|
||||||||||
| PREV NEXT | FRAMES NO FRAMES | |||||||||
HFile readers.HFile writers.AccessController.AccessControlServiceCheckPermissionsRequestCheckPermissionsRequestCheckPermissionsResponseCheckPermissionsResponseGetUserPermissionsRequestGetUserPermissionsRequestGetUserPermissionsResponseGetUserPermissionsResponseGlobalPermissionGlobalPermissionGrantRequestGrantRequestGrantResponseGrantResponseNamespacePermissionNamespacePermissionPermissionPermission.ActionPermissionPermission.TypeRevokeRequestRevokeRequestRevokeResponseRevokeResponseTablePermissionTablePermissionUserPermissionUserPermissionUsersAndPermissionsUsersAndPermissionsUsersAndPermissions.UserPermissionsUsersAndPermissions.UserPermissionsFlushSnapshotSubprocedure.insideBarrier() step.
Put operation to the list of mutations
Delete operation to the list of mutations
Constraint to the table with the given configuration
WALEdit.add(Cell) instead
field to the sequence of accumulated fields.
repeated .Permission.Action action = 1;
repeated .Permission.Action action = 2;
repeated .Permission.Action action = 4;
repeated .Action action = 3;
repeated .Action action = 3;
repeated .Action action = 3;
repeated .Action action = 3;
repeated .Action action = 3;
repeated .Action action = 3;
repeated .Permission.Action action = 1;
repeated .Permission.Action action = 2;
repeated .Permission.Action action = 4;
repeated .Action action = 3;
repeated .NameBytesPair attribute = 3;
repeated .NameBytesPair attribute = 5;
repeated .NameBytesPair attribute = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;
repeated bytes auth = 2;
repeated bytes auth = 2;
repeated uint32 auth = 2;
repeated .ServerName backup_masters = 8;
repeated .Cell cell = 1;
repeated uint32 cells_per_result = 1;
repeated .UUID cluster_ids = 8;
repeated .Column column = 2;
repeated .Column column = 1;
repeated .ColumnFamilySchema column_families = 3;
repeated bytes columns = 3;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;
repeated .MutationProto.ColumnValue column_value = 3;
repeated string compaction_input = 4;
repeated string compaction_output = 5;
repeated .NameStringPair configuration = 3;
repeated .NameStringPair configuration = 2;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .Coprocessor coprocessors = 6;
repeated .BytesBytesPair data = 3;
repeated string deadNodes = 2;
repeated .ServerName dead_servers = 3;
repeated .WALEntry entry = 1;
repeated bytes family = 2;
repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 2;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 1;
repeated .ServerName favored_node = 1;
repeated .ServerName favored_nodes = 3;
repeated .ServerName favored_nodes = 2;
repeated .Filter filters = 2;
repeated bytes first_part = 1;
repeated .BytesBytesPair fuzzy_keys_data = 1;
repeated bytes key_value_bytes = 2;
repeated string label = 1;
repeated bytes label = 1;
repeated string labels = 10;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;
repeated .LiveServerInfo live_servers = 2;
repeated string locations = 2;
repeated .NameStringPair map_entries = 1;
repeated .BytesBytesPair map_entry = 1;
repeated .Coprocessor master_coprocessors = 6;
repeated .NameInt64Pair metrics = 1;
repeated .MutationProto mutation_request = 1;
repeated string name = 1;
repeated .NamespaceDescriptor namespaceDescriptor = 1;
repeated .OpenRegionRequest.RegionOpenInfo open_info = 1;
repeated .OpenRegionResponse.RegionOpeningState opening_state = 1;
repeated .Permission permission = 1;
repeated .Permission permissions = 2;
repeated bytes qualifier = 2;
repeated bytes qualifiers = 1;
repeated .MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
repeated .RegionAction regionAction = 1;
repeated .RegionActionResult regionActionResult = 1;
repeated .RegionInfo region_info = 1;
repeated .RegionInfo region_info = 2;
repeated .RegionLoad region_loads = 5;
repeated .SnapshotRegionManifest region_manifests = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;
repeated .RegionInTransition regions_in_transition = 4;
repeated bytes region_to_flush = 1;
repeated .ReplicationLoadSource replLoadSource = 10;
repeated .RegionActionResult result = 1;
repeated .ResultOrException resultOrException = 1;
repeated .Result results = 5;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;
repeated .FamilyScope scopes = 6;
repeated .SnapshotDescription snapshots = 1;
repeated bytes sorted_prefixes = 1;
repeated bytes split_keys = 2;
repeated string store_file = 1;
repeated .SnapshotRegionManifest.StoreFile store_files = 2;
repeated .StoreSequenceId store_sequence_id = 2;
repeated .TableName tableName = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
repeated .TableSchema table_schema = 1;
repeated .TableSchema tableSchema = 1;
repeated int64 timestamps = 1 [packed = true];
repeated .StackTraceElementMessage trace = 4;
repeated .RegionStateTransition transition = 2;
repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1;
repeated .UserAuthorizations userAuths = 1;
repeated .UserPermission user_permission = 1;
repeated .UsersAndPermissions.UserPermissions user_permissions = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;
repeated .VisibilityLabel visLabel = 1;
repeated .NameBytesPair attribute = 3;
repeated .NameBytesPair attribute = 3;
repeated .NameBytesPair attribute = 3;
repeated .NameBytesPair attribute = 3;
repeated .NameBytesPair attribute = 5;
repeated .NameBytesPair attribute = 5;
repeated .NameBytesPair attribute = 5;
repeated .NameBytesPair attribute = 5;
repeated .NameBytesPair attribute = 2;
repeated .NameBytesPair attribute = 2;
repeated .NameBytesPair attribute = 2;
repeated .NameBytesPair attribute = 2;
repeated .NameBytesPair attribute = 3;
repeated .NameBytesPair attribute = 3;
repeated .NameBytesPair attribute = 5;
repeated .NameBytesPair attribute = 5;
repeated .NameBytesPair attribute = 2;
repeated .NameBytesPair attribute = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;
repeated bytes auth = 2;
repeated bytes auth = 2;
repeated uint32 auth = 2;
repeated .ServerName backup_masters = 8;
repeated .ServerName backup_masters = 8;
repeated .ServerName backup_masters = 8;
repeated .ServerName backup_masters = 8;
repeated .ServerName backup_masters = 8;
repeated .ServerName backup_masters = 8;
repeated .Cell cell = 1;
repeated .Cell cell = 1;
repeated .Cell cell = 1;
repeated .Cell cell = 1;
repeated .Cell cell = 1;
repeated .Cell cell = 1;
repeated uint32 cells_per_result = 1;
repeated .UUID cluster_ids = 8;
repeated .UUID cluster_ids = 8;
repeated .UUID cluster_ids = 8;
repeated .UUID cluster_ids = 8;
repeated .UUID cluster_ids = 8;
repeated .UUID cluster_ids = 8;
repeated .Column column = 2;
repeated .Column column = 2;
repeated .Column column = 2;
repeated .Column column = 2;
repeated .Column column = 1;
repeated .Column column = 1;
repeated .Column column = 1;
repeated .Column column = 1;
rpc AddColumn(.AddColumnRequest) returns (.AddColumnResponse);
rpc AddColumn(.AddColumnRequest) returns (.AddColumnResponse);
repeated .Column column = 2;
repeated .Column column = 2;
repeated .Column column = 1;
repeated .Column column = 1;
repeated .ColumnFamilySchema column_families = 3;
repeated .ColumnFamilySchema column_families = 3;
repeated .ColumnFamilySchema column_families = 3;
repeated .ColumnFamilySchema column_families = 3;
repeated .ColumnFamilySchema column_families = 3;
repeated .ColumnFamilySchema column_families = 3;
repeated bytes columns = 3;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;
repeated .MutationProto.ColumnValue column_value = 3;
repeated .MutationProto.ColumnValue column_value = 3;
repeated .MutationProto.ColumnValue column_value = 3;
repeated .MutationProto.ColumnValue column_value = 3;
repeated .MutationProto.ColumnValue column_value = 3;
repeated .MutationProto.ColumnValue column_value = 3;
repeated string compaction_input = 4;
repeated string compaction_input = 4;
repeated string compaction_output = 5;
repeated string compaction_output = 5;
repeated .NameStringPair configuration = 3;
repeated .NameStringPair configuration = 3;
repeated .NameStringPair configuration = 3;
repeated .NameStringPair configuration = 3;
repeated .NameStringPair configuration = 2;
repeated .NameStringPair configuration = 2;
repeated .NameStringPair configuration = 2;
repeated .NameStringPair configuration = 2;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 3;
repeated .NameStringPair configuration = 3;
repeated .NameStringPair configuration = 2;
repeated .NameStringPair configuration = 2;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .Coprocessor coprocessors = 6;
repeated .Coprocessor coprocessors = 6;
repeated .Coprocessor coprocessors = 6;
repeated .Coprocessor coprocessors = 6;
repeated .Coprocessor coprocessors = 6;
repeated .Coprocessor coprocessors = 6;
repeated .BytesBytesPair data = 3;
repeated .BytesBytesPair data = 3;
repeated .BytesBytesPair data = 3;
repeated .BytesBytesPair data = 3;
repeated .BytesBytesPair data = 3;
repeated .BytesBytesPair data = 3;
repeated string deadNodes = 2;
repeated string deadNodes = 2;
repeated .ServerName dead_servers = 3;
repeated .ServerName dead_servers = 3;
repeated .ServerName dead_servers = 3;
repeated .ServerName dead_servers = 3;
repeated .ServerName dead_servers = 3;
repeated .ServerName dead_servers = 3;
repeated .WALEntry entry = 1;
repeated .WALEntry entry = 1;
repeated .WALEntry entry = 1;
repeated .WALEntry entry = 1;
repeated .WALEntry entry = 1;
repeated .WALEntry entry = 1;
repeated bytes family = 2;
repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 2;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 2;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 2;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 2;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 1;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 1;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 1;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 1;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 2;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 2;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 1;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 1;
repeated .ServerName favored_node = 1;
repeated .ServerName favored_node = 1;
repeated .ServerName favored_node = 1;
repeated .ServerName favored_node = 1;
repeated .ServerName favored_node = 1;
repeated .ServerName favored_node = 1;
repeated .ServerName favored_nodes = 3;
repeated .ServerName favored_nodes = 3;
repeated .ServerName favored_nodes = 3;
repeated .ServerName favored_nodes = 3;
repeated .ServerName favored_nodes = 2;
repeated .ServerName favored_nodes = 2;
repeated .ServerName favored_nodes = 2;
repeated .ServerName favored_nodes = 2;
repeated .ServerName favored_nodes = 3;
repeated .ServerName favored_nodes = 3;
repeated .ServerName favored_nodes = 2;
repeated .ServerName favored_nodes = 2;
repeated .Filter filters = 2;
repeated .Filter filters = 2;
repeated .Filter filters = 2;
repeated .Filter filters = 2;
repeated .Filter filters = 2;
repeated .Filter filters = 2;
repeated bytes first_part = 1;
repeated .BytesBytesPair fuzzy_keys_data = 1;
repeated .BytesBytesPair fuzzy_keys_data = 1;
repeated .BytesBytesPair fuzzy_keys_data = 1;
repeated .BytesBytesPair fuzzy_keys_data = 1;
repeated .BytesBytesPair fuzzy_keys_data = 1;
repeated .BytesBytesPair fuzzy_keys_data = 1;
Put.add(byte[], byte[], byte[]).
Put.add(byte[], byte[], long, byte[]).
Put.add(byte[], ByteBuffer, long, ByteBuffer).
repeated bytes key_value_bytes = 2;
repeated string label = 1;
repeated bytes label = 1;
repeated string label = 1;
rpc addLabels(.VisibilityLabelsRequest) returns (.VisibilityLabelsResponse);
rpc addLabels(.VisibilityLabelsRequest) returns (.VisibilityLabelsResponse);
repeated string labels = 10;
repeated string labels = 10;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;
repeated .LiveServerInfo live_servers = 2;
repeated .LiveServerInfo live_servers = 2;
repeated .LiveServerInfo live_servers = 2;
repeated .LiveServerInfo live_servers = 2;
repeated .LiveServerInfo live_servers = 2;
repeated .LiveServerInfo live_servers = 2;
repeated string locations = 2;
repeated string locations = 2;
repeated .NameStringPair map_entries = 1;
repeated .NameStringPair map_entries = 1;
repeated .NameStringPair map_entries = 1;
repeated .NameStringPair map_entries = 1;
repeated .NameStringPair map_entries = 1;
repeated .NameStringPair map_entries = 1;
repeated .BytesBytesPair map_entry = 1;
repeated .BytesBytesPair map_entry = 1;
repeated .BytesBytesPair map_entry = 1;
repeated .BytesBytesPair map_entry = 1;
repeated .BytesBytesPair map_entry = 1;
repeated .BytesBytesPair map_entry = 1;
repeated .Coprocessor master_coprocessors = 6;
repeated .Coprocessor master_coprocessors = 6;
repeated .Coprocessor master_coprocessors = 6;
repeated .Coprocessor master_coprocessors = 6;
repeated .Coprocessor master_coprocessors = 6;
repeated .Coprocessor master_coprocessors = 6;
repeated .NameInt64Pair metrics = 1;
repeated .NameInt64Pair metrics = 1;
repeated .NameInt64Pair metrics = 1;
repeated .NameInt64Pair metrics = 1;
repeated .NameInt64Pair metrics = 1;
repeated .NameInt64Pair metrics = 1;
repeated .MutationProto mutation_request = 1;
repeated .MutationProto mutation_request = 1;
repeated .MutationProto mutation_request = 1;
repeated .MutationProto mutation_request = 1;
repeated .MutationProto mutation_request = 1;
repeated .MutationProto mutation_request = 1;
repeated string name = 1;
repeated string name = 1;
repeated .NamespaceDescriptor namespaceDescriptor = 1;
repeated .NamespaceDescriptor namespaceDescriptor = 1;
repeated .NamespaceDescriptor namespaceDescriptor = 1;
repeated .NamespaceDescriptor namespaceDescriptor = 1;
repeated .NamespaceDescriptor namespaceDescriptor = 1;
repeated .NamespaceDescriptor namespaceDescriptor = 1;
repeated .OpenRegionRequest.RegionOpenInfo open_info = 1;
repeated .OpenRegionRequest.RegionOpenInfo open_info = 1;
repeated .OpenRegionRequest.RegionOpenInfo open_info = 1;
repeated .OpenRegionRequest.RegionOpenInfo open_info = 1;
repeated .OpenRegionRequest.RegionOpenInfo open_info = 1;
repeated .OpenRegionRequest.RegionOpenInfo open_info = 1;
repeated .OpenRegionResponse.RegionOpeningState opening_state = 1;
AbstractHBaseTool.addOptWithArg(java.lang.String, java.lang.String)
and similar methods.
repeated .Permission permission = 1;
repeated .Permission permission = 1;
repeated .Permission permission = 1;
repeated .Permission permission = 1;
repeated .Permission permission = 1;
repeated .Permission permission = 1;
repeated .Permission permissions = 2;
repeated .Permission permissions = 2;
repeated .Permission permissions = 2;
repeated .Permission permissions = 2;
repeated .Permission permissions = 2;
repeated .Permission permissions = 2;
repeated bytes qualifier = 2;
repeated bytes qualifiers = 1;
repeated .MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
repeated .MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
repeated .MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
repeated .MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
repeated .MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
repeated .MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
repeated .RegionAction regionAction = 1;
repeated .RegionAction regionAction = 1;
repeated .RegionAction regionAction = 1;
repeated .RegionAction regionAction = 1;
repeated .RegionAction regionAction = 1;
repeated .RegionAction regionAction = 1;
repeated .RegionActionResult regionActionResult = 1;
repeated .RegionActionResult regionActionResult = 1;
repeated .RegionActionResult regionActionResult = 1;
repeated .RegionActionResult regionActionResult = 1;
repeated .RegionActionResult regionActionResult = 1;
repeated .RegionActionResult regionActionResult = 1;
repeated .RegionInfo region_info = 1;
repeated .RegionInfo region_info = 1;
repeated .RegionInfo region_info = 1;
repeated .RegionInfo region_info = 1;
repeated .RegionInfo region_info = 2;
repeated .RegionInfo region_info = 2;
repeated .RegionInfo region_info = 2;
repeated .RegionInfo region_info = 2;
repeated .RegionInfo region_info = 1;
repeated .RegionInfo region_info = 1;
repeated .RegionInfo region_info = 2;
repeated .RegionInfo region_info = 2;
repeated .RegionLoad region_loads = 5;
repeated .RegionLoad region_loads = 5;
repeated .RegionLoad region_loads = 5;
repeated .RegionLoad region_loads = 5;
repeated .RegionLoad region_loads = 5;
repeated .RegionLoad region_loads = 5;
repeated .SnapshotRegionManifest region_manifests = 2;
repeated .SnapshotRegionManifest region_manifests = 2;
repeated .SnapshotRegionManifest region_manifests = 2;
repeated .SnapshotRegionManifest region_manifests = 2;
repeated .SnapshotRegionManifest region_manifests = 2;
repeated .SnapshotRegionManifest region_manifests = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;
repeated .RegionInTransition regions_in_transition = 4;
repeated .RegionInTransition regions_in_transition = 4;
repeated .RegionInTransition regions_in_transition = 4;
repeated .RegionInTransition regions_in_transition = 4;
repeated .RegionInTransition regions_in_transition = 4;
repeated .RegionInTransition regions_in_transition = 4;
repeated bytes region_to_flush = 1;
meta region.
repeated .ReplicationLoadSource replLoadSource = 10;
repeated .ReplicationLoadSource replLoadSource = 10;
repeated .ReplicationLoadSource replLoadSource = 10;
repeated .ReplicationLoadSource replLoadSource = 10;
repeated .ReplicationLoadSource replLoadSource = 10;
repeated .ReplicationLoadSource replLoadSource = 10;
repeated .RegionActionResult result = 1;
repeated .RegionActionResult result = 1;
repeated .RegionActionResult result = 1;
repeated .RegionActionResult result = 1;
repeated .RegionActionResult result = 1;
repeated .RegionActionResult result = 1;
repeated .ResultOrException resultOrException = 1;
repeated .ResultOrException resultOrException = 1;
repeated .ResultOrException resultOrException = 1;
repeated .ResultOrException resultOrException = 1;
repeated .ResultOrException resultOrException = 1;
repeated .ResultOrException resultOrException = 1;
repeated .Result results = 5;
repeated .Result results = 5;
repeated .Result results = 5;
repeated .Result results = 5;
repeated .Result results = 5;
repeated .Result results = 5;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;
repeated .FamilyScope scopes = 6;
repeated .FamilyScope scopes = 6;
repeated .FamilyScope scopes = 6;
repeated .FamilyScope scopes = 6;
repeated .FamilyScope scopes = 6;
repeated .FamilyScope scopes = 6;
repeated .SnapshotDescription snapshots = 1;
repeated .SnapshotDescription snapshots = 1;
repeated .SnapshotDescription snapshots = 1;
repeated .SnapshotDescription snapshots = 1;
repeated .SnapshotDescription snapshots = 1;
repeated .SnapshotDescription snapshots = 1;
repeated bytes sorted_prefixes = 1;
repeated bytes split_keys = 2;
repeated string store_file = 1;
repeated string store_file = 1;
repeated .SnapshotRegionManifest.StoreFile store_files = 2;
repeated .SnapshotRegionManifest.StoreFile store_files = 2;
repeated .SnapshotRegionManifest.StoreFile store_files = 2;
repeated .SnapshotRegionManifest.StoreFile store_files = 2;
repeated .SnapshotRegionManifest.StoreFile store_files = 2;
repeated .SnapshotRegionManifest.StoreFile store_files = 2;
repeated .StoreSequenceId store_sequence_id = 2;
repeated .StoreSequenceId store_sequence_id = 2;
repeated .StoreSequenceId store_sequence_id = 2;
repeated .StoreSequenceId store_sequence_id = 2;
repeated .StoreSequenceId store_sequence_id = 2;
repeated .StoreSequenceId store_sequence_id = 2;
repeated .TableName tableName = 1;
repeated .TableName tableName = 1;
repeated .TableName tableName = 1;
repeated .TableName tableName = 1;
repeated .TableName tableName = 1;
repeated .TableName tableName = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
repeated .TableSchema table_schema = 1;
repeated .TableSchema table_schema = 1;
repeated .TableSchema table_schema = 1;
repeated .TableSchema table_schema = 1;
repeated .TableSchema tableSchema = 1;
repeated .TableSchema tableSchema = 1;
repeated .TableSchema tableSchema = 1;
repeated .TableSchema tableSchema = 1;
repeated .TableSchema table_schema = 1;
repeated .TableSchema table_schema = 1;
repeated .TableSchema tableSchema = 1;
repeated .TableSchema tableSchema = 1;
repeated int64 timestamps = 1 [packed = true];
repeated .StackTraceElementMessage trace = 4;
repeated .StackTraceElementMessage trace = 4;
repeated .StackTraceElementMessage trace = 4;
repeated .StackTraceElementMessage trace = 4;
repeated .StackTraceElementMessage trace = 4;
repeated .StackTraceElementMessage trace = 4;
repeated .RegionStateTransition transition = 2;
repeated .RegionStateTransition transition = 2;
repeated .RegionStateTransition transition = 2;
repeated .RegionStateTransition transition = 2;
repeated .RegionStateTransition transition = 2;
repeated .RegionStateTransition transition = 2;
repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1;
repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1;
repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1;
repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1;
repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1;
repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1;
repeated .UserAuthorizations userAuths = 1;
repeated .UserAuthorizations userAuths = 1;
repeated .UserAuthorizations userAuths = 1;
repeated .UserAuthorizations userAuths = 1;
repeated .UserAuthorizations userAuths = 1;
repeated .UserAuthorizations userAuths = 1;
repeated .UserPermission user_permission = 1;
repeated .UserPermission user_permission = 1;
repeated .UserPermission user_permission = 1;
repeated .UserPermission user_permission = 1;
repeated .UserPermission user_permission = 1;
repeated .UserPermission user_permission = 1;
repeated .UsersAndPermissions.UserPermissions user_permissions = 1;
repeated .UsersAndPermissions.UserPermissions user_permissions = 1;
repeated .UsersAndPermissions.UserPermissions user_permissions = 1;
repeated .UsersAndPermissions.UserPermissions user_permissions = 1;
repeated .UsersAndPermissions.UserPermissions user_permissions = 1;
repeated .UsersAndPermissions.UserPermissions user_permissions = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;
repeated .VisibilityLabel visLabel = 1;
repeated .VisibilityLabel visLabel = 1;
repeated .VisibilityLabel visLabel = 1;
repeated .VisibilityLabel visLabel = 1;
repeated .VisibilityLabel visLabel = 1;
repeated .VisibilityLabel visLabel = 1;
ADMIN = 4;
AdminServiceCloseRegionRequestCloseRegionRequestCloseRegionResponseCloseRegionResponseCompactRegionRequestCompactRegionRequestCompactRegionResponseCompactRegionResponseFlushRegionRequestFlushRegionRequestFlushRegionResponseFlushRegionResponseGetOnlineRegionRequestGetOnlineRegionRequestGetOnlineRegionResponseGetOnlineRegionResponseGetRegionInfoRequestGetRegionInfoRequestGetRegionInfoResponseGetRegionInfoResponseGetRegionInfoResponse.CompactionStateGetServerInfoRequestGetServerInfoRequestGetServerInfoResponseGetServerInfoResponseGetStoreFileRequestGetStoreFileRequestGetStoreFileResponseGetStoreFileResponseMergeRegionsRequestMergeRegionsRequestMergeRegionsResponseMergeRegionsResponseOpenRegionRequestOpenRegionRequestOpenRegionRequest.RegionOpenInfoOpenRegionRequest.RegionOpenInfoOpenRegionResponseOpenRegionResponseOpenRegionResponse.RegionOpeningStateReplicateWALEntryRequestReplicateWALEntryRequestReplicateWALEntryResponseReplicateWALEntryResponseRollWALWriterRequestRollWALWriterRequestRollWALWriterResponseRollWALWriterResponseServerInfoServerInfoSplitRegionRequestSplitRegionRequestSplitRegionResponseSplitRegionResponseStopServerRequestStopServerRequestStopServerResponseStopServerResponseUpdateFavoredNodesRequestUpdateFavoredNodesRequestUpdateFavoredNodesRequest.RegionUpdateInfoUpdateFavoredNodesRequest.RegionUpdateInfoUpdateFavoredNodesResponseUpdateFavoredNodesResponseWALEntryWALEntryAggregateRequestAggregateRequestAggregateResponseAggregateResponseAggregateServiceALREADY_OPENED = 1;
AND = 1;
APPEND = 0;
val.
val.
val.
RegionPlacementMaintainer to print
information for favored nodesrpc AssignRegion(.AssignRegionRequest) returns (.AssignRegionResponse);
rpc AssignRegion(.AssignRegionRequest) returns (.AssignRegionResponse);
ASYNC_WAL = 2;
AuthenticationTokenSecretManager.AuthenticationKeyAuthenticationKeyAuthenticationServiceGetAuthenticationTokenRequestGetAuthenticationTokenRequestGetAuthenticationTokenResponseGetAuthenticationTokenResponseTokenTokenTokenIdentifierTokenIdentifierTokenIdentifier.KindWhoAmIRequestWhoAmIRequestWhoAmIResponseWhoAmIResponserpc Balance(.BalanceRequest) returns (.BalanceResponse);
rpc Balance(.BalanceRequest) returns (.BalanceResponse);
RpcExecutor that will balance requests evenly across all its queues, but still remains
efficient with a single queue via an inlinable queue balancing mechanism.HMaster.balance() when
needed.Base64.Base64InputStream will read data from another
InputStream, given in the constructor, and
encode/decode to/from Base64 notation on the fly.Base64.Base64InputStream in DECODE mode.
Base64.Base64InputStream in either ENCODE or DECODE mode.
Base64.Base64OutputStream will write data to another
OutputStream, given in the constructor, and
encode/decode to/from Base64 notation on the fly.Base64.Base64OutputStream in ENCODE mode.
Base64.Base64OutputStream in either ENCODE or DECODE mode.
BaseConfigurable.setConf(Configuration) in the constructor, but
only sets the configuration through the BaseConfigurable.setConf(Configuration)
methodConstraint.ReplicationEndpoints.HRegion.processRowsWithLocks(org.apache.hadoop.hbase.regionserver.RowProcessor, ?>, long, long) and Coprocessor endpoints.HTable.batch(List, Object[]) instead.
HTableInterface.batch(List, Object[]) instead.
Batch.Call.call(Object)
result.HTableInterface.batch(List, Object[]), but with a callback.
HTable.batchCallback(List, Object[], org.apache.hadoop.hbase.client.coprocessor.Batch.Callback)
instead.
HTableInterface.batch(List, Object[]), but with a callback.
HTableInterface.batchCallback(List, Object[], org.apache.hadoop.hbase.client.coprocessor.Batch.Callback)
instead.
Service subclass for each table
region spanning the range from the startKey row to endKey row (inclusive), all
the invocations to the same region server will be batched into one call.
Service subclass for each table
region spanning the range from the startKey row to endKey row (inclusive), all
the invocations to the same region server will be batched into one call.
Service subclass for each table
region spanning the range from the startKey row to endKey row (inclusive), all
the invocations to the same region server will be batched into one call.
Service subclass for each table
region spanning the range from the startKey row to endKey row (inclusive), all
the invocations to the same region server will be batched into one call.
MultiVersionConsistencyControl.WriteEntry with a new write number.
Bytes.compareTo(byte[], byte[]).BlockCacheRpcCallback implementation providing a
Future-like BlockingRpcCallback.get() method, which
will block util the instance's BlockingRpcCallback.run(Object) method has been called.HFile.
StoreFile.BOTTOM = 1;
Queue.BucketAllocatorBucketAllocator to allocate/free block, and use
BucketCache.ramCache and BucketCache.backingMap in order to
determine if a given element is in the cache.conf.
Subprocedure when requested.
BulkDeleteRequestBulkDeleteRequestBulkDeleteRequest.DeleteTypeBulkDeleteResponseBulkDeleteResponseBulkDeleteServicerpc BulkLoadHFile(.BulkLoadHFileRequest) returns (.BulkLoadHFileResponse);
rpc BulkLoadHFile(.BulkLoadHFileRequest) returns (.BulkLoadHFileResponse);
ByteBufferArrayByteRange.Bytes.ByteArrayComparator that treats the empty array as the largest value.CacheableDeserializerBucketAllocator.allocateBlock(int) when cache is full for
the requested sizeSubprocedure.acquireBarrier() and Subprocedure.insideBarrier() methods
while keeping some state for other threads to access.
RpcScheduler.Progressable but returns
a boolean to support canceling the operation.hbase:meta.org.apache.hadoop.hbase.rest.protobuf.generated.Cellorg.apache.hadoop.hbase.rest.protobuf.generated.CellCellCellCellTypeKeyValueKeyValueCell inside a sorted collection of cells.org.apache.hadoop.hbase.rest.protobuf.generated.CellSetorg.apache.hadoop.hbase.rest.protobuf.generated.CellSetorg.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Roworg.apache.hadoop.hbase.rest.protobuf.generated.CellSet.RowCell instances.WALEntryFilter which contains multiple filters and applies them
in chain orderReadableByteChannel.read(java.nio.ByteBuffer).
WritableByteChannel.write(java.nio.ByteBuffer).
Put to ensure it is valid for the table.
HFile format version, and throws an exception if
invalid.
rpc CheckPermissions(.CheckPermissionsRequest) returns (.CheckPermissionsResponse);
rpc CheckPermissions(.CheckPermissionsRequest) returns (.CheckPermissionsResponse);
rpc CleanupBulkLoad(.CleanupBulkLoadRequest) returns (.CleanupBulkLoadResponse);
rpc CleanupBulkLoad(.CleanupBulkLoadRequest) returns (.CleanupBulkLoadResponse);
repeated .Permission.Action action = 1;
repeated .Permission.Action action = 2;
repeated .Permission.Action action = 4;
repeated .Action action = 3;
required uint64 ageOfLastAppliedOp = 1;
required uint64 ageOfLastShippedOp = 2;
required string algorithm = 1;
optional bool assign_seq_num = 3;
optional bool assign_seq_num = 2;
optional int32 associated_cell_count = 3;
optional int32 associated_cell_count = 8;
optional int32 associated_cell_count = 2;
optional bool atomic = 2;
repeated .NameBytesPair attribute = 3;
repeated .NameBytesPair attribute = 5;
repeated .NameBytesPair attribute = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;
repeated bytes auth = 2;
repeated bytes auth = 2;
repeated uint32 auth = 2;
optional string auth_method = 2;
rpc clearAuths(.SetAuthsRequest) returns (.VisibilityLabelsResponse);
rpc clearAuths(.SetAuthsRequest) returns (.VisibilityLabelsResponse);
optional double averageLoad = 5;
repeated .ServerName backup_masters = 8;
optional bool balancer_on = 9;
optional bool balancer_on = 1;
required bool balancer_ran = 1;
optional int32 batch = 4;
optional uint32 batch_size = 9;
required bytes bigdecimal_msg = 1;
required .BitComparator.BitwiseOp bitwise_op = 2;
required string bulk_token = 1;
required string bulk_token = 1;
required string bulk_token = 4;
optional bool cache_blocks = 7 [default = true];
optional bool cache_blocks = 8 [default = true];
optional bool cacheBlocks = 11;
sn.
optional uint32 caching = 17;
optional int32 caching = 9;
required .CoprocessorServiceCall call = 2;
optional uint32 call_id = 1;
optional uint32 call_id = 1;
repeated .Cell cell = 1;
optional string cell_block_codec_class = 3;
optional string cell_block_compressor_class = 4;
optional .CellBlockMeta cell_block_meta = 5;
optional .CellBlockMeta cell_block_meta = 3;
optional string cell_codec_cls_name = 5;
repeated uint32 cells_per_result = 1;
optional .CellType cell_type = 5;
required float chance = 1;
required string charset = 3;
optional string class_name = 1;
required bool closed = 1;
optional bool close_scanner = 5;
optional bool closest_row_before = 11 [default = false];
required string cluster_id = 1;
optional .ClusterId cluster_id = 5;
repeated .UUID cluster_ids = 8;
required string clusterkey = 1;
required .ClusterStatus cluster_status = 1;
repeated .Column column = 2;
repeated .Column column = 1;
optional bytes column = 2;
repeated .ColumnFamilySchema column_families = 3;
required .ColumnFamilySchema column_families = 2;
required .ColumnFamilySchema column_families = 2;
optional bytes column_family = 2;
optional bytes column_family = 1;
required bytes column_name = 2;
optional bytes column_offset = 3;
optional bytes column_qualifier = 3;
optional bytes column_qualifier = 2;
repeated bytes columns = 3;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;
repeated .MutationProto.ColumnValue column_value = 3;
repeated string compaction_input = 4;
repeated string compaction_output = 5;
optional bool compaction_state = 2;
optional .GetRegionInfoResponse.CompactionState compaction_state = 2;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .Comparator comparator = 5;
optional .Comparator comparator = 2;
required .Comparator comparator = 4;
optional string comparator_class_name = 11;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareType compare_op = 1;
required .CompareType compare_op = 3;
required .CompareType compare_type = 4;
optional uint64 complete_sequence_id = 15;
optional string compression = 5;
optional uint32 compression_codec = 12;
optional .Condition condition = 3;
optional .Condition condition = 3;
repeated .NameStringPair configuration = 3;
repeated .NameStringPair configuration = 2;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .Coprocessor coprocessors = 6;
required int64 count = 1 [default = 0];
required uint64 create_time = 3;
optional int64 create_time = 6;
optional int64 creation_time = 3 [default = 0];
optional int64 creation_time = 3 [default = 0];
optional uint64 current_compacted_KVs = 11;
optional int64 currentCompactedKVs = 13;
required bytes data = 3;
repeated .BytesBytesPair data = 3;
optional bytes data = 4;
optional uint32 data_index_count = 5;
optional float data_locality = 16;
required string date = 5;
repeated string deadNodes = 2;
repeated .ServerName dead_servers = 3;
optional string declaring_class = 1;
required .BulkDeleteRequest.DeleteType deleteType = 2;
optional .MutationProto.DeleteType delete_type = 4;
optional .ServerName destination_server = 4;
optional .ServerName dest_server_name = 2;
optional bool done = 1 [default = false];
optional bool done = 1 [default = false];
optional bool done = 1 [default = false];
optional bool do_not_retry = 5;
required double double_msg = 1;
optional bool drop_dependent_column = 4;
optional .MutationProto.Durability durability = 6 [default = USE_DEFAULT];
required string effective_user = 1;
required bool enable = 1;
required bool enabled = 1;
required bytes encoded_region_name = 2;
required bytes encoded_region_name = 1;
optional bytes encryption_key = 13;
optional bytes encryption_key = 2;
optional bytes end_key = 4;
optional bytes endKey = 3;
optional bytes endRow = 2;
optional int64 endTime = 6;
optional string engine = 4;
repeated .WALEntry entry = 1;
optional uint64 entry_count = 7;
optional bytes error_info = 3;
optional string error_message = 1;
required string error_message = 2;
required uint32 event_type_code = 1;
optional .NameBytesPair exception = 2;
optional .NameBytesPair exception = 3;
optional .ExceptionResponse exception = 2;
optional string exception_class_name = 1;
optional bool existence_only = 10 [default = false];
optional bool exists = 3;
required int64 expected_timeout = 1;
required int64 expected_timeout = 1;
required int64 expiration_date = 2;
optional int64 expiration_date = 5;
required string expression = 1;
optional bytes family = 2;
optional bytes family = 3;
repeated bytes family = 2;
optional bytes family = 2;
required bytes family = 2;
required bytes family = 1;
required bytes family = 1;
required bytes family = 2;
required bytes family = 1;
required bytes family = 1;
repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
required bytes family_name = 1;
required bytes family_name = 3;
required bytes family_name = 1;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 2;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 1;
repeated .ServerName favored_node = 1;
repeated .ServerName favored_nodes = 3;
repeated .ServerName favored_nodes = 2;
optional uint64 file_info_offset = 1;
optional string file_name = 3;
optional uint64 file_size = 3;
optional .Filter filter = 4;
optional .Filter filter = 5;
required .Filter filter = 1;
required .Filter filter = 1;
required .Filter filter = 1;
optional string filter = 8;
optional bool filter_if_missing = 5;
repeated .Filter filters = 2;
required bytes first = 1;
optional uint64 first_data_block_offset = 9;
repeated bytes first_part = 1;
optional bool flushed = 2;
optional uint32 following_kv_count = 7;
optional bool force = 2 [default = false];
optional bool forcible = 3 [default = false];
optional bool forcible = 3 [default = false];
optional uint64 from = 1;
required .DelegationToken fs_token = 3;
repeated .BytesBytesPair fuzzy_keys_data = 1;
optional .GenericExceptionMessage generic_exception = 2;
optional .Get get = 3;
required .Get get = 2;
optional .GlobalPermission global_permission = 2;
optional bool has_compression = 1;
optional bytes hash = 5;
optional bool has_tag_compression = 3;
optional .HBaseVersionFileContent hbase_version = 1;
optional int32 heapOccupancy = 2 [default = 0];
optional int32 heapSizeMB = 4;
optional string hfile = 3;
required string host_name = 1;
optional string hostname = 3;
required int32 id = 1;
optional int64 id = 4;
optional bytes identifier = 1;
optional bytes identifier = 1;
optional uint64 if_older_than_ts = 2;
optional uint32 index = 1;
optional uint32 index = 1;
optional int32 infoPort = 1;
optional uint32 info_server_port = 9;
optional bool inMemory = 4;
optional string instance = 2;
required string interpreter_class_name = 1;
optional bytes interpreter_specific_bytes = 3;
required bool is_master_running = 1;
optional bool isRecovering = 3;
optional bool is_shared = 4;
optional int64 issue_date = 4;
optional bytes iv = 4;
optional string jerseyVersion = 5;
optional string jvmVersion = 2;
required .WALKey key = 1;
required bytes key = 3;
required bytes key = 1;
required int32 key_id = 3;
optional .CellType key_type = 5;
repeated bytes key_value_bytes = 2;
required .TokenIdentifier.Kind kind = 1;
optional string kind = 3;
repeated string label = 1;
repeated bytes label = 1;
required bytes label = 1;
repeated string labels = 10;
optional uint64 last_data_block_offset = 10;
required uint64 last_flushed_sequence_id = 1;
required uint64 last_flushed_sequence_id = 1;
required uint64 last_flush_time = 1;
optional bool latest_version_only = 6;
required uint64 least_sig_bits = 1;
required bool len_as_val = 1;
required uint32 length = 2;
optional uint32 length = 1;
required int32 limit = 1;
required int32 limit = 1;
optional int32 line_number = 4;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;
repeated .LiveServerInfo live_servers = 2;
optional .ServerLoad load = 2;
optional bool load_column_families_on_demand = 13;
required bool loaded = 1;
required bool loaded = 1;
optional uint64 load_on_open_data_offset = 2;
optional .RegionLoadStats loadStats = 5;
optional string location = 5;
repeated string locations = 2;
required string lock_owner = 1;
optional .ServerName lock_owner = 2;
required uint64 log_sequence_number = 3;
required int64 long_msg = 1;
optional bool major = 2;
repeated .NameStringPair map_entries = 1;
repeated .BytesBytesPair map_entry = 1;
optional .ServerName master = 7;
required .ServerName master = 1;
repeated .Coprocessor master_coprocessors = 6;
optional bytes max_column = 3;
optional bool max_column_inclusive = 4;
optional uint32 max_heap_MB = 4;
optional int32 maxHeapSizeMB = 5;
optional uint64 max_result_size = 10;
optional uint32 max_versions = 6 [default = 1];
optional uint32 max_versions = 7 [default = 1];
optional int32 maxVersions = 4;
optional int32 maxVersions = 7;
optional int32 memstoreLoad = 1 [default = 0];
optional uint32 memstore_size_MB = 6;
optional int32 memstoreSizeMB = 5;
optional string message = 2;
optional uint32 meta_index_count = 6;
required string method_name = 3;
optional string method_name = 2;
optional string method_name = 3;
repeated .NameInt64Pair metrics = 1;
optional bytes min_column = 1;
optional bool min_column_inclusive = 2;
optional .SplitLogTask.RecoveryMode mode = 3 [default = UNKNOWN];
optional bool more_results = 3;
optional bool more_results_in_region = 8;
required uint64 most_sig_bits = 2;
optional .MutationProto.MutationType mutate_type = 2;
optional .MutationProto mutation = 2;
required .MutationProto mutation = 2;
repeated .MutationProto mutation_request = 1;
required string name = 1;
required string name = 1;
required bytes name = 1;
required string name = 1;
required string name = 1;
optional string name = 1;
required bytes name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
optional string name = 1;
required string name = 1;
required bytes name = 1;
required string name = 1;
required string name = 1;
repeated string name = 1;
required string name = 1;
optional string name = 1;
required bytes namespace = 1;
required .NamespaceDescriptor namespaceDescriptor = 1;
required .NamespaceDescriptor namespaceDescriptor = 1;
repeated .NamespaceDescriptor namespaceDescriptor = 1;
required .NamespaceDescriptor namespaceDescriptor = 1;
optional bytes namespace_name = 3;
optional bytes namespace_name = 1;
required string namespaceName = 1;
required string namespaceName = 1;
required string namespaceName = 1;
required string namespaceName = 1;
optional .NamespacePermission namespace_permission = 3;
optional uint64 next_call_seq = 6;
optional uint64 nonce = 9;
optional uint64 nonce = 3;
optional uint64 nonce = 5;
optional uint64 nonce = 10;
optional uint64 nonceGroup = 2;
optional uint64 nonce_group = 4;
optional uint64 nonce_group = 2;
optional uint64 nonce_group = 4;
optional uint64 nonceGroup = 9;
optional uint32 number_of_requests = 1;
optional uint32 number_of_rows = 4;
optional uint32 num_data_index_levels = 8;
optional bool offline = 5;
optional int32 offset = 2;
required bool on = 1;
optional bool openForDistributedLogReplay = 4;
repeated .OpenRegionRequest.RegionOpenInfo open_info = 1;
repeated .OpenRegionResponse.RegionOpeningState opening_state = 1;
optional uint64 open_seq_num = 3;
required .FilterList.Operator operator = 1;
optional uint32 ordinal = 2;
optional string osVersion = 3;
required int64 page_size = 1;
optional int64 parent_id = 2;
optional bytes password = 2;
optional bytes password = 2;
required string path = 2;
required string pattern = 1;
required int32 pattern_flags = 2;
optional bytes payload = 5;
required string peerID = 1;
repeated .Permission permission = 1;
required .Permission permission = 3;
repeated .Permission permissions = 2;
optional uint32 port = 2;
required uint32 port = 1;
optional int32 port = 4;
required int64 position = 1;
required bytes prefix = 1;
optional bytes prefix = 1;
optional bool preserveSplits = 2 [default = false];
optional bool prev_balance_value = 1;
optional bool prev_value = 1;
optional uint32 priority = 6;
required .ProcedureDescription procedure = 1;
optional .ProcedureDescription procedure = 1;
optional bool processed = 2;
optional bool processed = 2;
optional string purpose = 5;
optional bytes qualifier = 3;
optional bytes qualifier = 3;
required bytes qualifier = 3;
repeated bytes qualifier = 2;
required bytes qualifier = 3;
optional bytes qualifier = 1;
required bytes qualifier = 2;
repeated bytes qualifiers = 1;
repeated .MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
required .Reference.Range range = 2;
optional bool readOnly = 5;
optional uint64 read_requests_count = 8;
optional int64 readRequestsCount = 7;
optional string real_user = 2;
required string reason = 1;
optional .Reference reference = 2;
optional string regex = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionInfo region = 1;
required .RegionSpecifier region = 1;
required .RegionInfo region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
optional .RegionSpecifier region = 1;
optional .RegionInfo region = 4;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region_a = 1;
required .RegionSpecifier region_a = 1;
repeated .RegionAction regionAction = 1;
repeated .RegionActionResult regionActionResult = 1;
required .RegionSpecifier region_b = 2;
required .RegionSpecifier region_b = 2;
tableName
required uint64 region_id = 1;
repeated .RegionInfo region_info = 1;
required .RegionInfo region_info = 1;
required .RegionInfo region_info = 1;
repeated .RegionInfo region_info = 2;
required .RegionInfo region_info = 2;
repeated .RegionLoad region_loads = 5;
repeated .SnapshotRegionManifest region_manifests = 2;
required bytes region_name = 1;
optional bytes region_name = 7;
required bytes region_name = 2;
optional int32 regions = 3;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;
repeated .RegionInTransition regions_in_transition = 4;
required .RegionSpecifier region_specifier = 1;
required .RegionState region_state = 2;
repeated bytes region_to_flush = 1;
optional string replicationEndpointImpl = 2;
required uint64 replicationLag = 5;
optional .ReplicationLoadSink replLoadSink = 11;
repeated .ReplicationLoadSource replLoadSource = 10;
optional uint64 report_end_time = 8;
optional uint64 report_start_time = 7;
required bytes request = 4;
optional bool request_param = 4;
optional int32 requests = 4;
optional int32 requests = 3;
optional uint32 response = 1;
optional string restVersion = 1;
optional .Result result = 1;
optional .Result result = 1;
optional .Result result = 2;
repeated .RegionActionResult result = 1;
repeated .ResultOrException resultOrException = 1;
repeated .Result results = 5;
optional bool reversed = 15 [default = false];
required string revision = 3;
optional uint32 root_index_size_KB = 12;
optional int32 rootIndexSizeKB = 9;
optional bytes row = 1;
required bytes row = 1;
required bytes row = 1;
required bytes row = 1;
required bytes row = 1;
optional bytes row = 1;
optional bytes row = 1;
required uint32 rowBatchSize = 4;
required string row_processor_class_name = 1;
optional bytes row_processor_initializer_message = 3;
optional string row_processor_initializer_message_name = 2;
required bytes row_processor_result = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;
required uint64 rowsDeleted = 1;
optional uint32 rpc_version = 2;
optional uint32 rpc_version = 2;
required .Scan scan = 1;
required .Scan scan = 2;
optional .Scan scan = 2;
optional uint64 scanner_id = 3;
optional uint64 scanner_id = 2;
optional int32 scan_result = 1;
repeated .FamilyScope scopes = 6;
required .ScopeType scope_type = 2;
required bytes second = 2;
optional bytes second_part = 2;
required uint64 sequence_id = 2;
optional int64 sequence_number = 6;
optional bytes serialized_comparator = 2;
optional bytes serialized_filter = 2;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required uint64 server_current_time = 3;
required .ServerInfo server_info = 1;
required .ServerLoad server_load = 2;
required .ServerName server_name = 1;
required .ServerName server_name = 4;
required .ServerName server_name = 2;
optional uint64 serverStartCode = 5;
optional uint64 serverStartCode = 2;
required uint64 server_start_code = 2;
optional string serverVersion = 4;
optional bytes service = 3;
optional string service = 4;
optional .CoprocessorServiceCall service_call = 4;
required string service_name = 2;
optional string service_name = 2;
optional .CoprocessorServiceResult service_result = 4;
required string signature = 1;
required .SingleColumnValueFilter single_column_value_filter = 1;
required uint32 sizeOfLogQueue = 3;
optional bool small = 14;
required .SnapshotDescription snapshot = 1;
optional .ProcedureDescription snapshot = 2;
optional .SnapshotDescription snapshot = 1;
optional .SnapshotDescription snapshot = 1;
optional .SnapshotDescription snapshot = 2;
required .SnapshotDescription snapshot = 1;
required .SnapshotDescription snapshot = 1;
repeated .SnapshotDescription snapshots = 1;
repeated bytes sorted_prefixes = 1;
optional string source = 1;
required .RegionSpecifier spec = 1;
optional bool split = 6;
required bytes splitkey = 1;
repeated bytes split_keys = 2;
optional bytes split_point = 2;
required string src_checksum = 6;
optional string stack_trace = 2;
optional uint64 stamp = 3;
optional uint64 start_code = 3;
optional int64 startCode = 2;
required string start_date = 1;
optional bytes start_key = 3;
optional bytes startKey = 2;
optional bytes start_row = 3;
optional bytes startRow = 1;
optional int64 startTime = 5;
required .RegionState.State state = 2;
optional .RegionState.State state = 3;
required .ReplicationState.State state = 1;
required .SplitLogTask.State state = 1;
required .Table.State state = 1 [default = ENABLED];
optional bytes stop_row = 4;
optional bytes stop_row_key = 1;
repeated string store_file = 1;
optional uint32 storefile_index_size_MB = 7;
optional int32 storefileIndexSizeMB = 6;
optional uint32 storefiles = 3;
repeated .SnapshotRegionManifest.StoreFile store_files = 2;
optional int32 storefiles = 3;
optional uint32 storefile_size_MB = 5;
optional int32 storefileSizeMB = 4;
required string store_home_dir = 6;
optional uint32 store_limit = 8;
optional uint32 store_limit = 11;
optional uint32 store_offset = 9;
optional uint32 store_offset = 12;
optional uint32 stores = 2;
optional int32 stores = 2;
repeated .StoreSequenceId store_sequence_id = 2;
optional uint32 store_uncompressed_size_MB = 4;
required string substr = 1;
optional bool synchronous = 2;
optional string table = 2;
optional .TableSchema table = 3;
optional .TableName table_name = 2;
optional .TableName table_name = 1;
required .TableName table_name = 2;
optional .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
repeated .TableName tableName = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName tableName = 1;
required .TableName table_name = 1;
required bytes table_name = 1;
required bytes table_name = 2;
optional .TableName table_name = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
optional .TablePermission table_permission = 4;
required .TableSchema table_schema = 1;
repeated .TableSchema table_schema = 1;
repeated .TableSchema tableSchema = 1;
required .TableSchema table_schema = 2;
required .TableSchema table_schema = 1;
optional bytes tags = 7;
optional bytes tags = 7;
optional bytes tags = 5;
optional int64 thread_id = 3;
optional .TimeRange time_range = 5;
optional .TimeRange time_range = 7;
optional .TimeRange time_range = 6;
optional uint64 timestamp = 3;
optional uint64 timestamp = 4;
optional uint64 timestamp = 4;
optional uint64 timestamp = 4;
optional uint64 timestamp = 3;
optional int64 timestamp = 3;
required uint64 timeStampOfLastShippedOp = 4;
repeated int64 timestamps = 1 [packed = true];
required uint64 timeStampsOfLastAppliedOp = 2;
optional uint64 to = 2;
optional .Token token = 1;
optional uint64 total_compacting_KVs = 10;
optional int64 totalCompactingKVs = 12;
optional uint32 total_number_of_requests = 2;
optional uint32 total_regions = 2;
optional uint32 total_static_bloom_size_KB = 14;
optional int32 totalStaticBloomSizeKB = 11;
optional uint32 total_static_index_size_KB = 13;
optional int32 totalStaticIndexSizeKB = 10;
optional uint64 total_uncompressed_bytes = 4;
repeated .StackTraceElementMessage trace = 4;
optional int64 trace_id = 1;
optional .RPCTInfo trace_info = 2;
repeated .RegionStateTransition transition = 2;
required .RegionStateTransition.TransitionCode transition_code = 1;
optional bool transition_in_ZK = 3 [default = true];
optional uint32 ttl = 4;
optional int32 ttl = 3;
optional .Permission.Type type = 1;
required .Permission.Type type = 1;
required .RegionSpecifier.RegionSpecifierType type = 1;
optional .SnapshotDescription.Type type = 4 [default = FLUSH];
required .SnapshotFileInfo.Type type = 1;
optional uint64 uncompressed_data_index_size = 3;
repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1;
required string url = 2;
optional uint32 used_heap_MB = 3;
required bytes user = 1;
required bytes user = 1;
required string user = 4;
required bytes user = 1;
required bytes user = 1;
required bytes user = 1;
required bytes user = 1;
repeated .UserAuthorizations userAuths = 1;
optional .UserInformation user_info = 1;
required bytes username = 2;
optional string username = 1;
repeated .UserPermission user_permission = 1;
required .UserPermission user_permission = 1;
required .UserPermission user_permission = 1;
repeated .UsersAndPermissions.UserPermissions user_permissions = 1;
optional bytes value = 6;
optional bytes value = 6;
required .NameBytesPair value = 2;
optional .NameBytesPair value = 1;
optional bytes value = 2;
optional bytes value = 1;
optional bytes value = 2;
optional int64 value = 2;
required string value = 2;
required bytes value = 2;
required bool value = 1;
required string value = 2;
required string value = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;
required string version = 1;
optional int32 version = 5;
required string version = 1;
optional int32 version = 1;
optional .VersionInfo version_info = 5;
optional uint32 version_of_closing_node = 2;
optional uint32 version_of_offline_node = 2;
optional uint64 versionsDeleted = 2;
repeated .VisibilityLabel visLabel = 1;
optional string wal_name = 5;
optional string wal_server = 4;
optional uint32 webui_port = 2;
optional string writer_cls_name = 4;
optional uint64 write_requests_count = 9;
optional int64 writeRequestsCount = 8;
required uint64 write_time = 4;
optional uint32 yet_to_update_regions = 1;
ActionActionAuthorizationsAuthorizationsBulkLoadHFileRequestBulkLoadHFileRequestBulkLoadHFileRequest.FamilyPathBulkLoadHFileRequest.FamilyPathBulkLoadHFileResponseBulkLoadHFileResponseCellVisibilityCellVisibilityClientServiceColumnColumnConditionConditionCoprocessorServiceCallCoprocessorServiceCallCoprocessorServiceRequestCoprocessorServiceRequestCoprocessorServiceResponseCoprocessorServiceResponseCoprocessorServiceResultCoprocessorServiceResultGetGetGetRequestGetRequestGetResponseGetResponseMultiRequestMultiRequestMultiResponseMultiResponseMutateRequestMutateRequestMutateResponseMutateResponseMutationProtoMutationProtoMutationProto.ColumnValueMutationProto.ColumnValueMutationProto.ColumnValue.QualifierValueMutationProto.ColumnValue.QualifierValueMutationProto.DeleteTypeMutationProto.DurabilityMutationProto.MutationTypeRegionActionRegionActionRegionActionResultRegionActionResultRegionLoadStatsRegionLoadStatsResultResultResultOrExceptionResultOrExceptionScanScanScanRequestScanRequestScanResponseScanResponseClientScanner.ClientScanner(Configuration, Scan, TableName)
Scan's start row maybe changed changed.
ClientScanner.ClientScanner(Configuration, Scan, TableName, HConnection)
ClientScanner.ClientScanner(Configuration, Scan, TableName, HConnection,
RpcRetryingCallerFactory, RpcControllerFactory)
instead
Scan's start
row maybe changed changed.
Scan's start row maybe changed.
Scan's start row maybe changed changed.
HRegionInfo from the snapshot region info.
CLOSED = 6;
CLOSED = 2;
HRegion.createHRegion(HRegionInfo, Path, Configuration, HTableDescriptor)
requires.
outputStream (and so will close it).
rpc CloseRegion(.CloseRegionRequest) returns (.CloseRegionResponse);
rpc CloseRegion(.CloseRegionRequest) returns (.CloseRegionResponse);
HTablePool.closeTablePool(String).
CLOSING = 5;
ClusterIdClusterIdClusterStatusClusterStatusLiveServerInfoLiveServerInfoRegionInTransitionRegionInTransitionRegionLoadRegionLoadRegionStateRegionStateRegionState.StateReplicationLoadSinkReplicationLoadSinkReplicationLoadSourceReplicationLoadSourceServerLoadServerLoadOrder.
COLUMN = 2;
org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaorg.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attributeorg.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attributeorg.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaLruBlockCache and BucketCache.rpc CompactRegion(.CompactRegionRequest) returns (.CompactRegionResponse);
rpc CompactRegion(.CompactRegionRequest) returns (.CompactRegionResponse);
BinaryComparatorBinaryComparatorBinaryPrefixComparatorBinaryPrefixComparatorBitComparatorBitComparator.BitwiseOpBitComparatorByteArrayComparableByteArrayComparableComparatorComparatorLongComparatorLongComparatorNullComparatorNullComparatorRegexStringComparatorRegexStringComparatorSubstringComparatorSubstringComparatorKeyValue for keys in hbase:meta
table.
MultiVersionConsistencyControl.WriteEntry that was created by MultiVersionConsistencyControl.beginMemstoreInsert().
ByteBloomFilter,
encapsulating a set of fixed-size Bloom filters written out at the time of
HFile generation into the data
block stream, and loaded on demand at query time.HFile to the
CompoundBloomFilter class.ConcurrentSkipListSet
Import.CF_RENAME_PROP in conf that tells
the mapper how to rename column families.
ZKConfig.
RegionSplitPolicy implementation which splits a region
as soon as any of its store files exceeds a maximum configurable
size.Constraint (in traditional database terminology) to a HTable.Put.Constraints on a given table.DeserializationException to a more palatable KeeperException.
Services.RpcChannel instance
connected to the active master.
RpcChannel instance
connected to the passed region server.
RpcChannel instance connected to the
table region containing the specified row.
Service subclass for each table
region spanning the range from the startKey row to endKey row (inclusive),
and invokes the passed Batch.Call.call(T)
method with each Service
instance.
Service subclass for each table
region spanning the range from the startKey row to endKey row (inclusive),
and invokes the passed Batch.Call.call(T)
method with each Service instance.
RpcChannel instance connected to the
table region containing the specified row.
Service subclass for each table
region spanning the range from the startKey row to endKey row (inclusive),
and invokes the passed Batch.Call.call(T)
method with each Service
instance.
Service subclass for each table
region spanning the range from the startKey row to endKey row (inclusive),
and invokes the passed Batch.Call.call(T)
method with each Service instance.
Service instance via CoprocessorService.getService().DescriptiveStatistics.
length from in
RpcScheduler.
WALCellCodec from the cellCodecClsName and
CompressionContext, if cellCodecClsName is specified.
WALCellCodec from the
CompressionContext.
UserGroupInformation instance.
UserGroupInformation instance.
CREATE = 3;
null and sets the environment in the new or existing instance.
conf instance.
conf instance.
conf instance.
conf instance.
StoreFile writing.
ServerManager.createDestinationServersList(org.apache.hadoop.hbase.ServerName) without server to exclude.
StoreFile writing.
KeyValue.createLastOnRow(byte[], int, int, byte[], int, int,
byte[], int, int) but creates the last key on the row/column of this KV
(the value part of the returned KV is always empty).
JVMClusterUtil.MasterThread.
rpc CreateNamespace(.CreateNamespaceRequest) returns (.CreateNamespaceResponse);
rpc CreateNamespace(.CreateNamespaceRequest) returns (.CreateNamespaceResponse);
JVMClusterUtil.RegionServerThread.
CopyTable.createSubmittableJob(String[]) instead
rpc CreateTable(.CreateTableRequest) returns (.CreateTableResponse);
rpc CreateTable(.CreateTableRequest) returns (.CreateTableResponse);
User instance specifically for use in test code.
HLog.Writer for writing log splits.
EnvironmentEdge.currentTimeMillis() method.
RpcRetryingCaller
DataType is the base class for all HBase data types.T from the buffer src.
byte[] from the buffer src.
byte[] from the buffer src.
byte[] from the buffer src.
index.
src.
src.
byte value from the buffer src.
byte value from the buffer buff.
src.
src.
double value from the buffer src.
double value from the buffer src.
double value from the buffer buff.
float value from the buffer dst.
float value from the buffer buff.
int value from the buffer src.
int value from the buffer buff.
int16 value.
int32 value.
int64 value.
int8 value.
long value from the buffer src.
long value from the buffer src.
long value from the buffer buff.
BigDecimal value from the variable-length encoding.
double value from the Numeric encoding.
long value from the Numeric encoding.
RemoteException.unwrapRemoteException() instead.
In fact we should look into deprecating this whole class - St.Ack 2010929
short value from the buffer src.
short value from the buffer buff.
ByteRange with new backing byte[] containing a copy
of the content from this range's window.
HConstants.HBASE_CLIENT_MAX_PERREGION_TASKS.
HConstants.HBASE_CLIENT_MAX_PERSERVER_TASKS.
HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS.
HConstants.HBASE_CLIENT_PAUSE.
HConstants.HBASE_CLIENT_PREFETCH.
HConstants.HBASE_CLIENT_PREFETCH_LIMIT.
HConstants.HBASE_CLIENT_RETRIES_NUMBER.
HConstants.HBASE_CLIENT_SCANNER_CACHING
HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD.
HConstants.HBASE_META_BLOCK_SIZE.
HConstants.HBASE_META_SCANNER_CACHING.
HConstants.HBASE_META_VERSIONS.
HConstants.HBASE_RPC_SHORTOPERATION_TIMEOUT_KEY
HConstants.HBASE_RPC_TIMEOUT_KEY
HConstants.HBASE_SERVER_PAUSE.
BigDecimal values.
HTableDescriptor.DURABILITY instead.
RpcControllerFactory to help override
standard behavior of a PayloadCarryingRpcController.rpc delete(.BulkDeleteRequest) returns (.BulkDeleteResponse);
rpc delete(.BulkDeleteRequest) returns (.BulkDeleteResponse);
DELETE_COLUMN = 12;
DELETE_FAMILY = 14;
DELETE_FAMILY = 2;
DELETE_FAMILY_VERSION = 3;
DELETE_MULTIPLE_VERSIONS = 1;
DELETE_ONE_VERSION = 0;
DELETE = 8;
DELETE = 3;
rpc DeleteColumn(.DeleteColumnRequest) returns (.DeleteColumnResponse);
rpc DeleteColumn(.DeleteColumnRequest) returns (.DeleteColumnResponse);
deletes from the hbase:meta table.
Delete (HBase) from a TDelete (Thrift).
hbase:meta in ZooKeeper.
rpc DeleteNamespace(.DeleteNamespaceRequest) returns (.DeleteNamespaceResponse);
rpc DeleteNamespace(.DeleteNamespaceRequest) returns (.DeleteNamespaceResponse);
TDeletes (Thrift) into a list of Deletes (HBase).
rpc DeleteSnapshot(.DeleteSnapshotRequest) returns (.DeleteSnapshotResponse);
rpc DeleteSnapshot(.DeleteSnapshotRequest) returns (.DeleteSnapshotResponse);
rpc DeleteTable(.DeleteTableRequest) returns (.DeleteTableResponse);
rpc DeleteTable(.DeleteTableRequest) returns (.DeleteTableResponse);
CompareFilter
Constraint.
DISABLED = 0;
DISABLED = 1;
DISABLED = 1;
RegionSplitPolicy that disables region splits.rpc DisableTable(.DisableTableRequest) returns (.DisableTableResponse);
rpc DisableTable(.DisableTableRequest) returns (.DisableTableResponse);
DISABLING = 2;
rpc DispatchMergingRegions(.DispatchMergingRegionsRequest) returns (.DispatchMergingRegionsResponse);
rpc DispatchMergingRegions(.DispatchMergingRegionsRequest) returns (.DispatchMergingRegionsResponse);
DONE = 3;
BucketCache.Mutations
Note that the items must be sorted in order of increasing durabilityDurability setting for the table.
rpc EnableCatalogJanitor(.EnableCatalogJanitorRequest) returns (.EnableCatalogJanitorResponse);
rpc EnableCatalogJanitor(.EnableCatalogJanitorRequest) returns (.EnableCatalogJanitorResponse);
Constraint.
ENABLED = 0;
ENABLED = 0;
rpc EnableTable(.EnableTableRequest) returns (.EnableTableResponse);
rpc EnableTable(.EnableTableRequest) returns (.EnableTableResponse);
ENABLING = 3;
val into buffer dst.
val to dst.
val to buff.
val into dst, respecting voff and vlen.
val into buff, respecting offset and
length.
val into dst, respecting offset and
length.
val into buffer dst.
val into buffer dst.
val into buffer buff.
BlockType.ENCODED_DATA.
ENCODED_REGION_NAME = 2;
DataType operates.
byte[] will be.
val into buffer dst.
val into buffer dst.
val into buffer buff.
val into buffer buff.
val into buffer buff.
val into buffer dst.
val into buffer buff.
int16 value using the fixed-length encoding.
int32 value using the fixed-length encoding.
int64 value using the fixed-length encoding.
int8 value using the fixed-length encoding.
val into buffer dst.
val into buffer dst.
val into buffer buff.
val into buffer dst.
val into buffer buff.
TagType
.VISIBILITY_TAG_TYPE, that are part of the cell created from the WALEdits
that are prepared for replication while calling
ReplicationEndpoint
.replicate().
WrappedKeyWrappedKeySplitLogManager to timeout the task node.
IllegalStateException otherwise.
EQUAL = 2;
ERR = 4;
ForeignExceptionMessageForeignExceptionMessageGenericExceptionMessageGenericExceptionMessageStackTraceElementMessageStackTraceElementMessageCountRequestCountRequestCountResponseCountResponseRowCountServiceInterfaceAudience.Private or
InterfaceAudience.LimitedPrivate.EXEC = 2;
rpc ExecMasterService(.CoprocessorServiceRequest) returns (.CoprocessorServiceResponse);
rpc ExecMasterService(.CoprocessorServiceRequest) returns (.CoprocessorServiceResponse);
rpc ExecProcedure(.ExecProcedureRequest) returns (.ExecProcedureResponse);
rpc ExecProcedure(.ExecProcedureRequest) returns (.ExecProcedureResponse);
rpc ExecRegionServerService(.CoprocessorServiceRequest) returns (.CoprocessorServiceResponse);
rpc ExecRegionServerService(.CoprocessorServiceRequest) returns (.CoprocessorServiceResponse);
rpc ExecService(.CoprocessorServiceRequest) returns (.CoprocessorServiceResponse);
rpc ExecService(.CoprocessorServiceRequest) returns (.CoprocessorServiceResponse);
Service method using
the registered protocol handlers.
HConnectable.connect(org.apache.hadoop.hbase.client.HConnection)
implementation using a HConnection instance that lasts just for the
duration of the invocation.
Class.forName(String) which also returns classes for
primitives like boolean, etc.
FAILED_CLOSE = 10;
FAILED_OPEN = 9;
FAILED_OPEN = 1;
FAILED_OPENING = 2;
FAMILY = 1;
DiffKeyDeltaEncoder but supposedly faster.FavoredNodeLoadBalancer that has all the intelligence
for racks, meta scans, etc.LoadBalancer that assigns favored nodes for
each region.RpcScheduler} that serves incoming requests in order.Filter that represents an ordered List of Filters
which will be evaluated with a specified boolean operator FilterList.Operator.MUST_PASS_ALL
(AND) or FilterList.Operator.MUST_PASS_ONE (OR).Filters.
Filters.
Filters and an operator.
Filters and an operator.
ColumnCountGetFilterColumnCountGetFilterColumnPaginationFilterColumnPaginationFilterColumnPrefixFilterColumnPrefixFilterColumnRangeFilterColumnRangeFilterCompareFilterCompareFilterDependentColumnFilterDependentColumnFilterFamilyFilterFamilyFilterFilterFilterFilterAllFilterFilterAllFilterFilterListFilterListFilterList.OperatorFilterWrapperFilterWrapperFirstKeyOnlyFilterFirstKeyOnlyFilterFirstKeyValueMatchingQualifiersFilterFirstKeyValueMatchingQualifiersFilterFuzzyRowFilterFuzzyRowFilterInclusiveStopFilterInclusiveStopFilterKeyOnlyFilterKeyOnlyFilterMultipleColumnPrefixFilterMultipleColumnPrefixFilterPageFilterPageFilterPrefixFilterPrefixFilterQualifierFilterQualifierFilterRandomRowFilterRandomRowFilterRowFilterRowFilterSingleColumnValueExcludeFilterSingleColumnValueExcludeFilterSingleColumnValueFilterSingleColumnValueFilterSkipFilterSkipFilterTimestampsFilterTimestampsFilterValueFilterValueFilterWhileMatchFilterWhileMatchFilterFilter.filterKeyValue(Cell) calls.
FilterBase.filterKeyValue(Cell) can inherit this implementation that
never filters a row.
HConnectionManager.HConnectionImplementation.refCount is.
HFile has a fixed trailer which contains offsets to other
variable parts of the file.DataType implementation as a fixed-length
version of itself.wrapped.
FLUSH = 1;
Put operations.
Put operations.
rpc FlushRegion(.FlushRegionRequest) returns (.FlushRegionResponse);
rpc FlushRegion(.FlushRegionRequest) returns (.FlushRegionResponse);
Durability.SKIP_WAL and the data is imported to hbase, we
need to flush all the regions of the table as the data is held in memory and is also not
present in the Write Ahead Log to replay in scenarios of a crash.
HBaseVersionFileContentHBaseVersionFileContentReferenceReferenceReference.RangeTableDescriptors that reads descriptors from the
passed filesystem.PathFilter that only allows directories.PathFilter that returns usertable directories.FSYNC_WAL = 4;
hbase:meta, skipping regions from any
tables in the specified set of disabled tables.
hbase:meta, skipping regions from any
tables in the specified set of disabled tables.
hbase:meta.
hbase:meta.
hbase:meta table.
name.
BlockingRpcCallback.run(Object) or null if a null value was
passed.
rpc Get(.GetRequest) returns (.GetResponse);
rpc Get(.GetRequest) returns (.GetResponse);
index.
dst with bytes from the range, starting from index.
dst with bytes from the range, starting from index.
dst with bytes from the range, starting from position.
dst with bytes from the range, starting from the current
position.
repeated .Permission.Action action = 1;
repeated .Permission.Action action = 1;
repeated .Permission.Action action = 1;
repeated .Permission.Action action = 2;
repeated .Permission.Action action = 2;
repeated .Permission.Action action = 2;
repeated .Permission.Action action = 4;
repeated .Permission.Action action = 4;
repeated .Permission.Action action = 4;
repeated .Action action = 3;
repeated .Action action = 3;
repeated .Action action = 3;
repeated .Action action = 3;
repeated .Action action = 3;
repeated .Permission.Action action = 1;
repeated .Permission.Action action = 1;
repeated .Permission.Action action = 1;
repeated .Permission.Action action = 2;
repeated .Permission.Action action = 2;
repeated .Permission.Action action = 2;
repeated .Permission.Action action = 4;
repeated .Permission.Action action = 4;
repeated .Permission.Action action = 4;
repeated .Action action = 3;
repeated .Action action = 3;
repeated .Action action = 3;
repeated .Permission.Action action = 1;
repeated .Permission.Action action = 1;
repeated .Permission.Action action = 1;
repeated .Permission.Action action = 2;
repeated .Permission.Action action = 2;
repeated .Permission.Action action = 2;
repeated .Permission.Action action = 4;
repeated .Permission.Action action = 4;
repeated .Permission.Action action = 4;
repeated .Action action = 3;
repeated .Action action = 3;
repeated .Action action = 3;
repeated .Action action = 3;
repeated .Action action = 3;
repeated .Action action = 3;
repeated .Action action = 3;
repeated .Action action = 3;
repeated .Action action = 3;
required uint64 ageOfLastAppliedOp = 1;
required uint64 ageOfLastAppliedOp = 1;
required uint64 ageOfLastAppliedOp = 1;
required uint64 ageOfLastShippedOp = 2;
required uint64 ageOfLastShippedOp = 2;
required uint64 ageOfLastShippedOp = 2;
required string algorithm = 1;
required string algorithm = 1;
required string algorithm = 1;
required string algorithm = 1;
required string algorithm = 1;
required string algorithm = 1;
FileSystem
optional bool assign_seq_num = 3;
optional bool assign_seq_num = 3;
optional bool assign_seq_num = 3;
optional bool assign_seq_num = 2;
optional bool assign_seq_num = 2;
optional bool assign_seq_num = 2;
optional int32 associated_cell_count = 3;
optional int32 associated_cell_count = 3;
optional int32 associated_cell_count = 3;
optional int32 associated_cell_count = 8;
optional int32 associated_cell_count = 8;
optional int32 associated_cell_count = 8;
optional int32 associated_cell_count = 2;
optional int32 associated_cell_count = 2;
optional int32 associated_cell_count = 2;
optional bool atomic = 2;
optional bool atomic = 2;
optional bool atomic = 2;
repeated .NameBytesPair attribute = 3;
repeated .NameBytesPair attribute = 3;
repeated .NameBytesPair attribute = 3;
repeated .NameBytesPair attribute = 5;
repeated .NameBytesPair attribute = 5;
repeated .NameBytesPair attribute = 5;
repeated .NameBytesPair attribute = 2;
repeated .NameBytesPair attribute = 2;
repeated .NameBytesPair attribute = 2;
repeated .NameBytesPair attribute = 3;
repeated .NameBytesPair attribute = 5;
repeated .NameBytesPair attribute = 2;
repeated .NameBytesPair attribute = 3;
repeated .NameBytesPair attribute = 5;
repeated .NameBytesPair attribute = 2;
repeated .NameBytesPair attribute = 3;
repeated .NameBytesPair attribute = 3;
repeated .NameBytesPair attribute = 3;
repeated .NameBytesPair attribute = 5;
repeated .NameBytesPair attribute = 5;
repeated .NameBytesPair attribute = 5;
repeated .NameBytesPair attribute = 2;
repeated .NameBytesPair attribute = 2;
repeated .NameBytesPair attribute = 2;
repeated .NameBytesPair attribute = 3;
repeated .NameBytesPair attribute = 3;
repeated .NameBytesPair attribute = 3;
repeated .NameBytesPair attribute = 5;
repeated .NameBytesPair attribute = 5;
repeated .NameBytesPair attribute = 5;
repeated .NameBytesPair attribute = 2;
repeated .NameBytesPair attribute = 2;
repeated .NameBytesPair attribute = 2;
repeated .NameBytesPair attribute = 3;
repeated .NameBytesPair attribute = 3;
repeated .NameBytesPair attribute = 3;
repeated .NameBytesPair attribute = 5;
repeated .NameBytesPair attribute = 5;
repeated .NameBytesPair attribute = 5;
repeated .NameBytesPair attribute = 2;
repeated .NameBytesPair attribute = 2;
repeated .NameBytesPair attribute = 2;
repeated .NameBytesPair attribute = 3;
repeated .NameBytesPair attribute = 3;
repeated .NameBytesPair attribute = 3;
repeated .NameBytesPair attribute = 5;
repeated .NameBytesPair attribute = 5;
repeated .NameBytesPair attribute = 5;
repeated .NameBytesPair attribute = 2;
repeated .NameBytesPair attribute = 2;
repeated .NameBytesPair attribute = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;
repeated bytes auth = 2;
repeated bytes auth = 2;
repeated bytes auth = 2;
repeated bytes auth = 2;
repeated bytes auth = 2;
repeated bytes auth = 2;
repeated uint32 auth = 2;
repeated uint32 auth = 2;
repeated uint32 auth = 2;
repeated bytes auth = 2;
repeated bytes auth = 2;
repeated bytes auth = 2;
repeated bytes auth = 2;
repeated bytes auth = 2;
repeated bytes auth = 2;
repeated uint32 auth = 2;
repeated uint32 auth = 2;
repeated uint32 auth = 2;
rpc GetAuthenticationToken(.GetAuthenticationTokenRequest) returns (.GetAuthenticationTokenResponse);
rpc GetAuthenticationToken(.GetAuthenticationTokenRequest) returns (.GetAuthenticationTokenResponse);
repeated bytes auth = 2;
repeated bytes auth = 2;
repeated bytes auth = 2;
repeated bytes auth = 2;
repeated bytes auth = 2;
repeated bytes auth = 2;
repeated uint32 auth = 2;
repeated uint32 auth = 2;
repeated uint32 auth = 2;
optional string auth_method = 2;
optional string auth_method = 2;
optional string auth_method = 2;
optional string auth_method = 2;
optional string auth_method = 2;
optional string auth_method = 2;
rpc getAuths(.GetAuthsRequest) returns (.GetAuthsResponse);
rpc getAuths(.GetAuthsRequest) returns (.GetAuthsResponse);
optional double averageLoad = 5;
optional double averageLoad = 5;
optional double averageLoad = 5;
rpc GetAvg(.AggregateRequest) returns (.AggregateResponse);
rpc GetAvg(.AggregateRequest) returns (.AggregateResponse);
repeated .ServerName backup_masters = 8;
repeated .ServerName backup_masters = 8;
repeated .ServerName backup_masters = 8;
repeated .ServerName backup_masters = 8;
repeated .ServerName backup_masters = 8;
repeated .ServerName backup_masters = 8;
repeated .ServerName backup_masters = 8;
repeated .ServerName backup_masters = 8;
repeated .ServerName backup_masters = 8;
repeated .ServerName backup_masters = 8;
repeated .ServerName backup_masters = 8;
repeated .ServerName backup_masters = 8;
repeated .ServerName backup_masters = 8;
repeated .ServerName backup_masters = 8;
repeated .ServerName backup_masters = 8;
repeated .ServerName backup_masters = 8;
repeated .ServerName backup_masters = 8;
optional bool balancer_on = 9;
optional bool balancer_on = 9;
optional bool balancer_on = 9;
optional bool balancer_on = 1;
optional bool balancer_on = 1;
optional bool balancer_on = 1;
required bool balancer_ran = 1;
required bool balancer_ran = 1;
required bool balancer_ran = 1;
optional int32 batch = 4;
optional int32 batch = 4;
optional int32 batch = 4;
optional uint32 batch_size = 9;
optional uint32 batch_size = 9;
optional uint32 batch_size = 9;
required bytes bigdecimal_msg = 1;
required bytes bigdecimal_msg = 1;
required bytes bigdecimal_msg = 1;
required .BitComparator.BitwiseOp bitwise_op = 2;
required .BitComparator.BitwiseOp bitwise_op = 2;
required .BitComparator.BitwiseOp bitwise_op = 2;
BlockType.ENCODED_DATA blocks from this file.
required string bulk_token = 1;
required string bulk_token = 1;
required string bulk_token = 1;
required string bulk_token = 1;
required string bulk_token = 1;
required string bulk_token = 1;
required string bulk_token = 4;
required string bulk_token = 4;
required string bulk_token = 4;
required string bulk_token = 1;
required string bulk_token = 1;
required string bulk_token = 1;
required string bulk_token = 1;
required string bulk_token = 1;
required string bulk_token = 1;
required string bulk_token = 4;
required string bulk_token = 4;
required string bulk_token = 4;
buf,
from the position (inclusive) to the limit (exclusive).
optional bool cache_blocks = 7 [default = true];
optional bool cache_blocks = 7 [default = true];
optional bool cache_blocks = 7 [default = true];
optional bool cache_blocks = 8 [default = true];
optional bool cache_blocks = 8 [default = true];
optional bool cache_blocks = 8 [default = true];
optional bool cacheBlocks = 11;
optional bool cacheBlocks = 11;
optional bool cacheBlocks = 11;
optional uint32 caching = 17;
optional uint32 caching = 17;
optional uint32 caching = 17;
optional int32 caching = 9;
optional int32 caching = 9;
optional int32 caching = 9;
required .CoprocessorServiceCall call = 2;
required .CoprocessorServiceCall call = 2;
required .CoprocessorServiceCall call = 2;
required .CoprocessorServiceCall call = 2;
optional uint32 call_id = 1;
optional uint32 call_id = 1;
optional uint32 call_id = 1;
optional uint32 call_id = 1;
optional uint32 call_id = 1;
optional uint32 call_id = 1;
required .CoprocessorServiceCall call = 2;
required .CoprocessorServiceCall call = 2;
required .CoprocessorServiceCall call = 2;
StoreFileManager.getCandidateFilesForRowKeyBefore(KeyValue)
for details on this methods.
repeated .Cell cell = 1;
repeated .Cell cell = 1;
repeated .Cell cell = 1;
optional string cell_block_codec_class = 3;
optional string cell_block_codec_class = 3;
optional string cell_block_codec_class = 3;
optional string cell_block_codec_class = 3;
optional string cell_block_codec_class = 3;
optional string cell_block_codec_class = 3;
optional string cell_block_compressor_class = 4;
optional string cell_block_compressor_class = 4;
optional string cell_block_compressor_class = 4;
optional string cell_block_compressor_class = 4;
optional string cell_block_compressor_class = 4;
optional string cell_block_compressor_class = 4;
optional .CellBlockMeta cell_block_meta = 5;
optional .CellBlockMeta cell_block_meta = 5;
optional .CellBlockMeta cell_block_meta = 5;
optional .CellBlockMeta cell_block_meta = 3;
optional .CellBlockMeta cell_block_meta = 3;
optional .CellBlockMeta cell_block_meta = 3;
optional .CellBlockMeta cell_block_meta = 5;
optional .CellBlockMeta cell_block_meta = 3;
optional .CellBlockMeta cell_block_meta = 5;
optional .CellBlockMeta cell_block_meta = 5;
optional .CellBlockMeta cell_block_meta = 5;
optional .CellBlockMeta cell_block_meta = 3;
optional .CellBlockMeta cell_block_meta = 3;
optional .CellBlockMeta cell_block_meta = 3;
repeated .Cell cell = 1;
repeated .Cell cell = 1;
optional string cell_codec_cls_name = 5;
optional string cell_codec_cls_name = 5;
optional string cell_codec_cls_name = 5;
optional string cell_codec_cls_name = 5;
optional string cell_codec_cls_name = 5;
optional string cell_codec_cls_name = 5;
repeated .Cell cell = 1;
repeated .Cell cell = 1;
repeated .Cell cell = 1;
repeated .Cell cell = 1;
repeated .Cell cell = 1;
repeated .Cell cell = 1;
repeated .Cell cell = 1;
repeated .Cell cell = 1;
repeated .Cell cell = 1;
repeated .Cell cell = 1;
repeated .Cell cell = 1;
repeated .Cell cell = 1;
repeated uint32 cells_per_result = 1;
repeated uint32 cells_per_result = 1;
repeated uint32 cells_per_result = 1;
repeated uint32 cells_per_result = 1;
repeated uint32 cells_per_result = 1;
repeated uint32 cells_per_result = 1;
repeated uint32 cells_per_result = 1;
repeated uint32 cells_per_result = 1;
repeated uint32 cells_per_result = 1;
optional .CellType cell_type = 5;
optional .CellType cell_type = 5;
optional .CellType cell_type = 5;
required float chance = 1;
required float chance = 1;
required float chance = 1;
required string charset = 3;
required string charset = 3;
required string charset = 3;
required string charset = 3;
required string charset = 3;
required string charset = 3;
optional string class_name = 1;
optional string class_name = 1;
optional string class_name = 1;
optional string class_name = 1;
optional string class_name = 1;
optional string class_name = 1;
required bool closed = 1;
required bool closed = 1;
required bool closed = 1;
optional bool close_scanner = 5;
optional bool close_scanner = 5;
optional bool close_scanner = 5;
optional bool closest_row_before = 11 [default = false];
optional bool closest_row_before = 11 [default = false];
optional bool closest_row_before = 11 [default = false];
required string cluster_id = 1;
required string cluster_id = 1;
required string cluster_id = 1;
optional .ClusterId cluster_id = 5;
optional .ClusterId cluster_id = 5;
optional .ClusterId cluster_id = 5;
optional .ClusterId cluster_id = 5;
required string cluster_id = 1;
required string cluster_id = 1;
required string cluster_id = 1;
optional .ClusterId cluster_id = 5;
optional .ClusterId cluster_id = 5;
optional .ClusterId cluster_id = 5;
repeated .UUID cluster_ids = 8;
repeated .UUID cluster_ids = 8;
repeated .UUID cluster_ids = 8;
repeated .UUID cluster_ids = 8;
repeated .UUID cluster_ids = 8;
repeated .UUID cluster_ids = 8;
repeated .UUID cluster_ids = 8;
repeated .UUID cluster_ids = 8;
repeated .UUID cluster_ids = 8;
repeated .UUID cluster_ids = 8;
repeated .UUID cluster_ids = 8;
repeated .UUID cluster_ids = 8;
repeated .UUID cluster_ids = 8;
repeated .UUID cluster_ids = 8;
repeated .UUID cluster_ids = 8;
repeated .UUID cluster_ids = 8;
repeated .UUID cluster_ids = 8;
required string clusterkey = 1;
required string clusterkey = 1;
required string clusterkey = 1;
required string clusterkey = 1;
required string clusterkey = 1;
required string clusterkey = 1;
required .ClusterStatus cluster_status = 1;
required .ClusterStatus cluster_status = 1;
required .ClusterStatus cluster_status = 1;
rpc GetClusterStatus(.GetClusterStatusRequest) returns (.GetClusterStatusResponse);
rpc GetClusterStatus(.GetClusterStatusRequest) returns (.GetClusterStatusResponse);
required .ClusterStatus cluster_status = 1;
required .ClusterStatus cluster_status = 1;
required .ClusterStatus cluster_status = 1;
required .ClusterStatus cluster_status = 1;
Result.getColumnCells(byte[], byte[]) instead.
repeated .Column column = 2;
repeated .Column column = 2;
repeated .Column column = 2;
repeated .Column column = 1;
repeated .Column column = 1;
repeated .Column column = 1;
optional bytes column = 2;
optional bytes column = 2;
optional bytes column = 2;
repeated .Column column = 2;
repeated .Column column = 1;
repeated .Column column = 2;
repeated .Column column = 1;
repeated .Column column = 2;
repeated .Column column = 2;
repeated .Column column = 2;
repeated .Column column = 1;
repeated .Column column = 1;
repeated .Column column = 1;
HColumnDescriptor of the column families
of the table.
repeated .ColumnFamilySchema column_families = 3;
repeated .ColumnFamilySchema column_families = 3;
repeated .ColumnFamilySchema column_families = 3;
required .ColumnFamilySchema column_families = 2;
required .ColumnFamilySchema column_families = 2;
required .ColumnFamilySchema column_families = 2;
required .ColumnFamilySchema column_families = 2;
required .ColumnFamilySchema column_families = 2;
required .ColumnFamilySchema column_families = 2;
repeated .ColumnFamilySchema column_families = 3;
required .ColumnFamilySchema column_families = 2;
required .ColumnFamilySchema column_families = 2;
repeated .ColumnFamilySchema column_families = 3;
repeated .ColumnFamilySchema column_families = 3;
repeated .ColumnFamilySchema column_families = 3;
repeated .ColumnFamilySchema column_families = 3;
repeated .ColumnFamilySchema column_families = 3;
repeated .ColumnFamilySchema column_families = 3;
repeated .ColumnFamilySchema column_families = 3;
repeated .ColumnFamilySchema column_families = 3;
repeated .ColumnFamilySchema column_families = 3;
repeated .ColumnFamilySchema column_families = 3;
required .ColumnFamilySchema column_families = 2;
required .ColumnFamilySchema column_families = 2;
required .ColumnFamilySchema column_families = 2;
required .ColumnFamilySchema column_families = 2;
required .ColumnFamilySchema column_families = 2;
required .ColumnFamilySchema column_families = 2;
repeated .ColumnFamilySchema column_families = 3;
repeated .ColumnFamilySchema column_families = 3;
repeated .ColumnFamilySchema column_families = 3;
optional bytes column_family = 2;
optional bytes column_family = 2;
optional bytes column_family = 2;
optional bytes column_family = 1;
optional bytes column_family = 1;
optional bytes column_family = 1;
Result.getColumnLatestCell(byte[], byte[]) instead.
Result.getColumnLatestCell(byte[], int, int, byte[], int, int) instead.
repeated .Column column = 2;
repeated .Column column = 2;
repeated .Column column = 2;
repeated .Column column = 1;
repeated .Column column = 1;
repeated .Column column = 1;
required bytes column_name = 2;
required bytes column_name = 2;
required bytes column_name = 2;
optional bytes column_offset = 3;
optional bytes column_offset = 3;
optional bytes column_offset = 3;
repeated .Column column = 2;
repeated .Column column = 2;
repeated .Column column = 2;
repeated .Column column = 1;
repeated .Column column = 1;
repeated .Column column = 1;
repeated .Column column = 2;
repeated .Column column = 2;
repeated .Column column = 2;
repeated .Column column = 1;
repeated .Column column = 1;
repeated .Column column = 1;
optional bytes column_qualifier = 3;
optional bytes column_qualifier = 3;
optional bytes column_qualifier = 3;
optional bytes column_qualifier = 2;
optional bytes column_qualifier = 2;
optional bytes column_qualifier = 2;
repeated bytes columns = 3;
repeated bytes columns = 3;
repeated bytes columns = 3;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;
repeated bytes columns = 3;
repeated bytes columns = 3;
repeated bytes columns = 3;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;
repeated bytes columns = 3;
repeated bytes columns = 3;
repeated bytes columns = 3;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;
repeated .MutationProto.ColumnValue column_value = 3;
repeated .MutationProto.ColumnValue column_value = 3;
repeated .MutationProto.ColumnValue column_value = 3;
repeated .MutationProto.ColumnValue column_value = 3;
repeated .MutationProto.ColumnValue column_value = 3;
repeated .MutationProto.ColumnValue column_value = 3;
repeated .MutationProto.ColumnValue column_value = 3;
repeated .MutationProto.ColumnValue column_value = 3;
repeated .MutationProto.ColumnValue column_value = 3;
repeated .MutationProto.ColumnValue column_value = 3;
repeated .MutationProto.ColumnValue column_value = 3;
repeated .MutationProto.ColumnValue column_value = 3;
repeated .MutationProto.ColumnValue column_value = 3;
repeated .MutationProto.ColumnValue column_value = 3;
repeated .MutationProto.ColumnValue column_value = 3;
repeated .MutationProto.ColumnValue column_value = 3;
repeated .MutationProto.ColumnValue column_value = 3;
repeated string compaction_input = 4;
repeated string compaction_input = 4;
repeated string compaction_input = 4;
repeated string compaction_input = 4;
repeated string compaction_input = 4;
repeated string compaction_input = 4;
repeated string compaction_input = 4;
repeated string compaction_input = 4;
repeated string compaction_input = 4;
repeated string compaction_input = 4;
repeated string compaction_input = 4;
repeated string compaction_input = 4;
repeated string compaction_output = 5;
repeated string compaction_output = 5;
repeated string compaction_output = 5;
repeated string compaction_output = 5;
repeated string compaction_output = 5;
repeated string compaction_output = 5;
repeated string compaction_output = 5;
repeated string compaction_output = 5;
repeated string compaction_output = 5;
repeated string compaction_output = 5;
repeated string compaction_output = 5;
repeated string compaction_output = 5;
optional bool compaction_state = 2;
optional bool compaction_state = 2;
optional bool compaction_state = 2;
optional .GetRegionInfoResponse.CompactionState compaction_state = 2;
optional .GetRegionInfoResponse.CompactionState compaction_state = 2;
optional .GetRegionInfoResponse.CompactionState compaction_state = 2;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .Comparator comparator = 5;
required .Comparator comparator = 5;
required .Comparator comparator = 5;
optional .Comparator comparator = 2;
optional .Comparator comparator = 2;
optional .Comparator comparator = 2;
required .Comparator comparator = 4;
required .Comparator comparator = 4;
required .Comparator comparator = 4;
required .Comparator comparator = 5;
optional .Comparator comparator = 2;
required .Comparator comparator = 4;
optional string comparator_class_name = 11;
optional string comparator_class_name = 11;
optional string comparator_class_name = 11;
optional string comparator_class_name = 11;
optional string comparator_class_name = 11;
optional string comparator_class_name = 11;
required .Comparator comparator = 5;
required .Comparator comparator = 5;
required .Comparator comparator = 5;
optional .Comparator comparator = 2;
optional .Comparator comparator = 2;
optional .Comparator comparator = 2;
required .Comparator comparator = 4;
required .Comparator comparator = 4;
required .Comparator comparator = 4;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareType compare_op = 1;
required .CompareType compare_op = 1;
required .CompareType compare_op = 1;
required .CompareType compare_op = 3;
required .CompareType compare_op = 3;
required .CompareType compare_op = 3;
required .CompareType compare_type = 4;
required .CompareType compare_type = 4;
required .CompareType compare_type = 4;
rpc GetCompletedSnapshots(.GetCompletedSnapshotsRequest) returns (.GetCompletedSnapshotsResponse);
rpc GetCompletedSnapshots(.GetCompletedSnapshotsRequest) returns (.GetCompletedSnapshotsResponse);
optional uint64 complete_sequence_id = 15;
optional uint64 complete_sequence_id = 15;
optional uint64 complete_sequence_id = 15;
optional string compression = 5;
optional string compression = 5;
optional string compression = 5;
optional string compression = 5;
optional string compression = 5;
optional string compression = 5;
optional uint32 compression_codec = 12;
optional uint32 compression_codec = 12;
optional uint32 compression_codec = 12;
optional .Condition condition = 3;
optional .Condition condition = 3;
optional .Condition condition = 3;
optional .Condition condition = 3;
optional .Condition condition = 3;
optional .Condition condition = 3;
optional .Condition condition = 3;
optional .Condition condition = 3;
optional .Condition condition = 3;
optional .Condition condition = 3;
optional .Condition condition = 3;
optional .Condition condition = 3;
optional .Condition condition = 3;
optional .Condition condition = 3;
Configuration object used by this instance.
Configuration object used by this instance.
HColumnDescriptor.configuration map.
HTableDescriptor.configuration map.
NamespaceDescriptor.configuration map.
repeated .NameStringPair configuration = 3;
repeated .NameStringPair configuration = 3;
repeated .NameStringPair configuration = 3;
repeated .NameStringPair configuration = 2;
repeated .NameStringPair configuration = 2;
repeated .NameStringPair configuration = 2;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 3;
repeated .NameStringPair configuration = 2;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 3;
repeated .NameStringPair configuration = 2;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 3;
repeated .NameStringPair configuration = 3;
repeated .NameStringPair configuration = 3;
repeated .NameStringPair configuration = 2;
repeated .NameStringPair configuration = 2;
repeated .NameStringPair configuration = 2;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 3;
repeated .NameStringPair configuration = 3;
repeated .NameStringPair configuration = 3;
repeated .NameStringPair configuration = 2;
repeated .NameStringPair configuration = 2;
repeated .NameStringPair configuration = 2;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 3;
repeated .NameStringPair configuration = 3;
repeated .NameStringPair configuration = 3;
repeated .NameStringPair configuration = 2;
repeated .NameStringPair configuration = 2;
repeated .NameStringPair configuration = 2;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 3;
repeated .NameStringPair configuration = 3;
repeated .NameStringPair configuration = 3;
repeated .NameStringPair configuration = 2;
repeated .NameStringPair configuration = 2;
repeated .NameStringPair configuration = 2;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
HConnection based on the environment in which we are running the
coprocessor.
repeated .Coprocessor coprocessors = 6;
repeated .Coprocessor coprocessors = 6;
repeated .Coprocessor coprocessors = 6;
repeated .Coprocessor coprocessors = 6;
repeated .Coprocessor coprocessors = 6;
repeated .Coprocessor coprocessors = 6;
repeated .Coprocessor coprocessors = 6;
repeated .Coprocessor coprocessors = 6;
repeated .Coprocessor coprocessors = 6;
repeated .Coprocessor coprocessors = 6;
repeated .Coprocessor coprocessors = 6;
repeated .Coprocessor coprocessors = 6;
repeated .Coprocessor coprocessors = 6;
repeated .Coprocessor coprocessors = 6;
repeated .Coprocessor coprocessors = 6;
repeated .Coprocessor coprocessors = 6;
repeated .Coprocessor coprocessors = 6;
required int64 count = 1 [default = 0];
required int64 count = 1 [default = 0];
required int64 count = 1 [default = 0];
required uint64 create_time = 3;
required uint64 create_time = 3;
required uint64 create_time = 3;
optional int64 create_time = 6;
optional int64 create_time = 6;
optional int64 create_time = 6;
optional int64 creation_time = 3 [default = 0];
optional int64 creation_time = 3 [default = 0];
optional int64 creation_time = 3 [default = 0];
optional int64 creation_time = 3 [default = 0];
optional int64 creation_time = 3 [default = 0];
optional int64 creation_time = 3 [default = 0];
User instance within current execution context.
optional uint64 current_compacted_KVs = 11;
optional uint64 current_compacted_KVs = 11;
optional uint64 current_compacted_KVs = 11;
optional int64 currentCompactedKVs = 13;
optional int64 currentCompactedKVs = 13;
optional int64 currentCompactedKVs = 13;
required bytes data = 3;
required bytes data = 3;
required bytes data = 3;
repeated .BytesBytesPair data = 3;
repeated .BytesBytesPair data = 3;
repeated .BytesBytesPair data = 3;
optional bytes data = 4;
optional bytes data = 4;
optional bytes data = 4;
repeated .BytesBytesPair data = 3;
repeated .BytesBytesPair data = 3;
repeated .BytesBytesPair data = 3;
repeated .BytesBytesPair data = 3;
repeated .BytesBytesPair data = 3;
optional uint32 data_index_count = 5;
optional uint32 data_index_count = 5;
optional uint32 data_index_count = 5;
repeated .BytesBytesPair data = 3;
repeated .BytesBytesPair data = 3;
repeated .BytesBytesPair data = 3;
optional float data_locality = 16;
optional float data_locality = 16;
optional float data_locality = 16;
repeated .BytesBytesPair data = 3;
repeated .BytesBytesPair data = 3;
repeated .BytesBytesPair data = 3;
repeated .BytesBytesPair data = 3;
repeated .BytesBytesPair data = 3;
repeated .BytesBytesPair data = 3;
required string date = 5;
required string date = 5;
required string date = 5;
required string date = 5;
required string date = 5;
required string date = 5;
repeated string deadNodes = 2;
repeated string deadNodes = 2;
repeated string deadNodes = 2;
repeated string deadNodes = 2;
repeated string deadNodes = 2;
repeated string deadNodes = 2;
repeated string deadNodes = 2;
repeated string deadNodes = 2;
repeated string deadNodes = 2;
repeated string deadNodes = 2;
repeated string deadNodes = 2;
repeated string deadNodes = 2;
repeated .ServerName dead_servers = 3;
repeated .ServerName dead_servers = 3;
repeated .ServerName dead_servers = 3;
repeated .ServerName dead_servers = 3;
repeated .ServerName dead_servers = 3;
repeated .ServerName dead_servers = 3;
repeated .ServerName dead_servers = 3;
repeated .ServerName dead_servers = 3;
repeated .ServerName dead_servers = 3;
repeated .ServerName dead_servers = 3;
repeated .ServerName dead_servers = 3;
repeated .ServerName dead_servers = 3;
repeated .ServerName dead_servers = 3;
repeated .ServerName dead_servers = 3;
repeated .ServerName dead_servers = 3;
repeated .ServerName dead_servers = 3;
repeated .ServerName dead_servers = 3;
optional string declaring_class = 1;
optional string declaring_class = 1;
optional string declaring_class = 1;
optional string declaring_class = 1;
optional string declaring_class = 1;
optional string declaring_class = 1;
InputStream.available()
InputStream.available()
LoadBalancer class.
EnvironmentEdge that is
being managed.
HFile version.
required .BulkDeleteRequest.DeleteType deleteType = 2;
required .BulkDeleteRequest.DeleteType deleteType = 2;
required .BulkDeleteRequest.DeleteType deleteType = 2;
optional .MutationProto.DeleteType delete_type = 4;
optional .MutationProto.DeleteType delete_type = 4;
optional .MutationProto.DeleteType delete_type = 4;
optional .ServerName destination_server = 4;
optional .ServerName destination_server = 4;
optional .ServerName destination_server = 4;
optional .ServerName destination_server = 4;
optional .ServerName destination_server = 4;
optional .ServerName destination_server = 4;
optional .ServerName destination_server = 4;
optional .ServerName dest_server_name = 2;
optional .ServerName dest_server_name = 2;
optional .ServerName dest_server_name = 2;
optional .ServerName dest_server_name = 2;
optional .ServerName dest_server_name = 2;
optional .ServerName dest_server_name = 2;
optional .ServerName dest_server_name = 2;
optional bool done = 1 [default = false];
optional bool done = 1 [default = false];
optional bool done = 1 [default = false];
optional bool done = 1 [default = false];
optional bool done = 1 [default = false];
optional bool done = 1 [default = false];
optional bool done = 1 [default = false];
optional bool done = 1 [default = false];
optional bool done = 1 [default = false];
optional bool do_not_retry = 5;
optional bool do_not_retry = 5;
optional bool do_not_retry = 5;
required double double_msg = 1;
required double double_msg = 1;
required double double_msg = 1;
optional bool drop_dependent_column = 4;
optional bool drop_dependent_column = 4;
optional bool drop_dependent_column = 4;
optional .MutationProto.Durability durability = 6 [default = USE_DEFAULT];
optional .MutationProto.Durability durability = 6 [default = USE_DEFAULT];
optional .MutationProto.Durability durability = 6 [default = USE_DEFAULT];
required string effective_user = 1;
required string effective_user = 1;
required string effective_user = 1;
required string effective_user = 1;
required string effective_user = 1;
required string effective_user = 1;
required bool enable = 1;
required bool enable = 1;
required bool enable = 1;
required bool enabled = 1;
required bool enabled = 1;
required bool enabled = 1;
required bytes encoded_region_name = 2;
required bytes encoded_region_name = 2;
required bytes encoded_region_name = 2;
required bytes encoded_region_name = 1;
required bytes encoded_region_name = 1;
required bytes encoded_region_name = 1;
optional bytes encryption_key = 13;
optional bytes encryption_key = 13;
optional bytes encryption_key = 13;
optional bytes encryption_key = 2;
optional bytes encryption_key = 2;
optional bytes encryption_key = 2;
optional bytes end_key = 4;
optional bytes end_key = 4;
optional bytes end_key = 4;
optional bytes endKey = 3;
optional bytes endKey = 3;
optional bytes endKey = 3;
optional bytes endRow = 2;
optional bytes endRow = 2;
optional bytes endRow = 2;
optional int64 endTime = 6;
optional int64 endTime = 6;
optional int64 endTime = 6;
optional string engine = 4;
optional string engine = 4;
optional string engine = 4;
optional string engine = 4;
optional string engine = 4;
optional string engine = 4;
repeated .WALEntry entry = 1;
repeated .WALEntry entry = 1;
repeated .WALEntry entry = 1;
repeated .WALEntry entry = 1;
repeated .WALEntry entry = 1;
repeated .WALEntry entry = 1;
repeated .WALEntry entry = 1;
repeated .WALEntry entry = 1;
optional uint64 entry_count = 7;
optional uint64 entry_count = 7;
optional uint64 entry_count = 7;
repeated .WALEntry entry = 1;
repeated .WALEntry entry = 1;
repeated .WALEntry entry = 1;
repeated .WALEntry entry = 1;
repeated .WALEntry entry = 1;
repeated .WALEntry entry = 1;
repeated .WALEntry entry = 1;
repeated .WALEntry entry = 1;
repeated .WALEntry entry = 1;
optional bytes error_info = 3;
optional bytes error_info = 3;
optional bytes error_info = 3;
optional string error_message = 1;
optional string error_message = 1;
optional string error_message = 1;
required string error_message = 2;
required string error_message = 2;
required string error_message = 2;
optional string error_message = 1;
optional string error_message = 1;
optional string error_message = 1;
required string error_message = 2;
required string error_message = 2;
required string error_message = 2;
required uint32 event_type_code = 1;
required uint32 event_type_code = 1;
required uint32 event_type_code = 1;
optional .NameBytesPair exception = 2;
optional .NameBytesPair exception = 2;
optional .NameBytesPair exception = 2;
optional .NameBytesPair exception = 3;
optional .NameBytesPair exception = 3;
optional .NameBytesPair exception = 3;
optional .ExceptionResponse exception = 2;
optional .ExceptionResponse exception = 2;
optional .ExceptionResponse exception = 2;
optional .NameBytesPair exception = 2;
optional .NameBytesPair exception = 3;
optional .ExceptionResponse exception = 2;
optional string exception_class_name = 1;
optional string exception_class_name = 1;
optional string exception_class_name = 1;
optional string exception_class_name = 1;
optional string exception_class_name = 1;
optional string exception_class_name = 1;
optional .NameBytesPair exception = 2;
optional .NameBytesPair exception = 2;
optional .NameBytesPair exception = 2;
optional .NameBytesPair exception = 3;
optional .NameBytesPair exception = 3;
optional .NameBytesPair exception = 3;
optional .ExceptionResponse exception = 2;
optional .ExceptionResponse exception = 2;
optional .ExceptionResponse exception = 2;
optional bool existence_only = 10 [default = false];
optional bool existence_only = 10 [default = false];
optional bool existence_only = 10 [default = false];
optional bool exists = 3;
optional bool exists = 3;
optional bool exists = 3;
required int64 expected_timeout = 1;
required int64 expected_timeout = 1;
required int64 expected_timeout = 1;
required int64 expected_timeout = 1;
required int64 expected_timeout = 1;
required int64 expected_timeout = 1;
required int64 expiration_date = 2;
required int64 expiration_date = 2;
required int64 expiration_date = 2;
optional int64 expiration_date = 5;
optional int64 expiration_date = 5;
optional int64 expiration_date = 5;
required string expression = 1;
required string expression = 1;
required string expression = 1;
required string expression = 1;
required string expression = 1;
required string expression = 1;
null if no exception
was thrown.
HColumnDescriptor
of all the column families of the table.
CellUtil.cloneFamily(Cell)
optional bytes family = 2;
optional bytes family = 2;
optional bytes family = 2;
optional bytes family = 3;
optional bytes family = 3;
optional bytes family = 3;
repeated bytes family = 2;
repeated bytes family = 2;
repeated bytes family = 2;
optional bytes family = 2;
optional bytes family = 2;
optional bytes family = 2;
required bytes family = 2;
required bytes family = 2;
required bytes family = 2;
required bytes family = 1;
required bytes family = 1;
required bytes family = 1;
required bytes family = 1;
required bytes family = 1;
required bytes family = 1;
required bytes family = 2;
required bytes family = 2;
required bytes family = 2;
required bytes family = 1;
required bytes family = 1;
required bytes family = 1;
required bytes family = 1;
required bytes family = 1;
required bytes family = 1;
repeated bytes family = 2;
repeated bytes family = 2;
repeated bytes family = 2;
repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
repeated bytes family = 2;
repeated bytes family = 2;
repeated bytes family = 2;
Mutation.getFamilyCellMap() instead.
required bytes family_name = 1;
required bytes family_name = 1;
required bytes family_name = 1;
required bytes family_name = 3;
required bytes family_name = 3;
required bytes family_name = 3;
required bytes family_name = 1;
required bytes family_name = 1;
required bytes family_name = 1;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 2;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 2;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 2;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 1;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 1;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 1;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 2;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 1;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 2;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 1;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 2;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 2;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 2;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 1;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 1;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 1;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 2;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 2;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 2;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 1;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 1;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 1;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 2;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 2;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 2;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 1;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 1;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 1;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 2;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 2;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 2;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 1;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 1;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 1;
repeated .ServerName favored_node = 1;
repeated .ServerName favored_node = 1;
repeated .ServerName favored_node = 1;
repeated .ServerName favored_node = 1;
repeated .ServerName favored_node = 1;
repeated .ServerName favored_node = 1;
repeated .ServerName favored_node = 1;
repeated .ServerName favored_node = 1;
repeated .ServerName favored_node = 1;
repeated .ServerName favored_node = 1;
repeated .ServerName favored_node = 1;
repeated .ServerName favored_node = 1;
repeated .ServerName favored_node = 1;
repeated .ServerName favored_node = 1;
repeated .ServerName favored_node = 1;
repeated .ServerName favored_node = 1;
repeated .ServerName favored_node = 1;
repeated .ServerName favored_nodes = 3;
repeated .ServerName favored_nodes = 3;
repeated .ServerName favored_nodes = 3;
repeated .ServerName favored_nodes = 2;
repeated .ServerName favored_nodes = 2;
repeated .ServerName favored_nodes = 2;
repeated .ServerName favored_nodes = 3;
repeated .ServerName favored_nodes = 2;
repeated .ServerName favored_nodes = 3;
repeated .ServerName favored_nodes = 2;
repeated .ServerName favored_nodes = 3;
repeated .ServerName favored_nodes = 3;
repeated .ServerName favored_nodes = 3;
repeated .ServerName favored_nodes = 2;
repeated .ServerName favored_nodes = 2;
repeated .ServerName favored_nodes = 2;
repeated .ServerName favored_nodes = 3;
repeated .ServerName favored_nodes = 3;
repeated .ServerName favored_nodes = 3;
repeated .ServerName favored_nodes = 2;
repeated .ServerName favored_nodes = 2;
repeated .ServerName favored_nodes = 2;
repeated .ServerName favored_nodes = 3;
repeated .ServerName favored_nodes = 3;
repeated .ServerName favored_nodes = 3;
repeated .ServerName favored_nodes = 2;
repeated .ServerName favored_nodes = 2;
repeated .ServerName favored_nodes = 2;
repeated .ServerName favored_nodes = 3;
repeated .ServerName favored_nodes = 3;
repeated .ServerName favored_nodes = 3;
repeated .ServerName favored_nodes = 2;
repeated .ServerName favored_nodes = 2;
repeated .ServerName favored_nodes = 2;
optional uint64 file_info_offset = 1;
optional uint64 file_info_offset = 1;
optional uint64 file_info_offset = 1;
optional string file_name = 3;
optional string file_name = 3;
optional string file_name = 3;
optional string file_name = 3;
optional string file_name = 3;
optional string file_name = 3;
optional uint64 file_size = 3;
optional uint64 file_size = 3;
optional uint64 file_size = 3;
optional .Filter filter = 4;
optional .Filter filter = 4;
optional .Filter filter = 4;
optional .Filter filter = 5;
optional .Filter filter = 5;
optional .Filter filter = 5;
required .Filter filter = 1;
required .Filter filter = 1;
required .Filter filter = 1;
required .Filter filter = 1;
required .Filter filter = 1;
required .Filter filter = 1;
required .Filter filter = 1;
required .Filter filter = 1;
required .Filter filter = 1;
optional string filter = 8;
optional string filter = 8;
optional string filter = 8;
optional .Filter filter = 4;
optional .Filter filter = 5;
required .Filter filter = 1;
required .Filter filter = 1;
required .Filter filter = 1;
optional string filter = 8;
optional string filter = 8;
optional string filter = 8;
optional bool filter_if_missing = 5;
optional bool filter_if_missing = 5;
optional bool filter_if_missing = 5;
optional .Filter filter = 4;
optional .Filter filter = 4;
optional .Filter filter = 4;
optional .Filter filter = 5;
optional .Filter filter = 5;
optional .Filter filter = 5;
required .Filter filter = 1;
required .Filter filter = 1;
required .Filter filter = 1;
required .Filter filter = 1;
required .Filter filter = 1;
required .Filter filter = 1;
required .Filter filter = 1;
required .Filter filter = 1;
required .Filter filter = 1;
repeated .Filter filters = 2;
repeated .Filter filters = 2;
repeated .Filter filters = 2;
repeated .Filter filters = 2;
repeated .Filter filters = 2;
repeated .Filter filters = 2;
repeated .Filter filters = 2;
repeated .Filter filters = 2;
repeated .Filter filters = 2;
repeated .Filter filters = 2;
repeated .Filter filters = 2;
repeated .Filter filters = 2;
repeated .Filter filters = 2;
repeated .Filter filters = 2;
repeated .Filter filters = 2;
repeated .Filter filters = 2;
repeated .Filter filters = 2;
required bytes first = 1;
required bytes first = 1;
required bytes first = 1;
optional uint64 first_data_block_offset = 9;
optional uint64 first_data_block_offset = 9;
optional uint64 first_data_block_offset = 9;
repeated bytes first_part = 1;
repeated bytes first_part = 1;
repeated bytes first_part = 1;
repeated bytes first_part = 1;
repeated bytes first_part = 1;
repeated bytes first_part = 1;
repeated bytes first_part = 1;
repeated bytes first_part = 1;
repeated bytes first_part = 1;
HFile version 1: move this to StoreFile after Ryan's
patch goes in to eliminate KeyValue here.
optional bool flushed = 2;
optional bool flushed = 2;
optional bool flushed = 2;
optional uint32 following_kv_count = 7;
optional uint32 following_kv_count = 7;
optional uint32 following_kv_count = 7;
optional bool force = 2 [default = false];
optional bool force = 2 [default = false];
optional bool force = 2 [default = false];
optional bool forcible = 3 [default = false];
optional bool forcible = 3 [default = false];
optional bool forcible = 3 [default = false];
optional bool forcible = 3 [default = false];
optional bool forcible = 3 [default = false];
optional bool forcible = 3 [default = false];
optional uint64 from = 1;
optional uint64 from = 1;
optional uint64 from = 1;
Get (HBase) from a TGet (Thrift).
required .DelegationToken fs_token = 3;
required .DelegationToken fs_token = 3;
required .DelegationToken fs_token = 3;
required .DelegationToken fs_token = 3;
required .DelegationToken fs_token = 3;
required .DelegationToken fs_token = 3;
required .DelegationToken fs_token = 3;
repeated .BytesBytesPair fuzzy_keys_data = 1;
repeated .BytesBytesPair fuzzy_keys_data = 1;
repeated .BytesBytesPair fuzzy_keys_data = 1;
repeated .BytesBytesPair fuzzy_keys_data = 1;
repeated .BytesBytesPair fuzzy_keys_data = 1;
repeated .BytesBytesPair fuzzy_keys_data = 1;
repeated .BytesBytesPair fuzzy_keys_data = 1;
repeated .BytesBytesPair fuzzy_keys_data = 1;
repeated .BytesBytesPair fuzzy_keys_data = 1;
repeated .BytesBytesPair fuzzy_keys_data = 1;
repeated .BytesBytesPair fuzzy_keys_data = 1;
repeated .BytesBytesPair fuzzy_keys_data = 1;
repeated .BytesBytesPair fuzzy_keys_data = 1;
repeated .BytesBytesPair fuzzy_keys_data = 1;
repeated .BytesBytesPair fuzzy_keys_data = 1;
repeated .BytesBytesPair fuzzy_keys_data = 1;
repeated .BytesBytesPair fuzzy_keys_data = 1;
HFile version.
optional .GenericExceptionMessage generic_exception = 2;
optional .GenericExceptionMessage generic_exception = 2;
optional .GenericExceptionMessage generic_exception = 2;
optional .GenericExceptionMessage generic_exception = 2;
optional .GenericExceptionMessage generic_exception = 2;
optional .GenericExceptionMessage generic_exception = 2;
optional .GenericExceptionMessage generic_exception = 2;
optional .Get get = 3;
optional .Get get = 3;
optional .Get get = 3;
required .Get get = 2;
required .Get get = 2;
required .Get get = 2;
optional .Get get = 3;
required .Get get = 2;
optional .Get get = 3;
optional .Get get = 3;
optional .Get get = 3;
required .Get get = 2;
required .Get get = 2;
required .Get get = 2;
optional .GlobalPermission global_permission = 2;
optional .GlobalPermission global_permission = 2;
optional .GlobalPermission global_permission = 2;
optional .GlobalPermission global_permission = 2;
optional .GlobalPermission global_permission = 2;
optional .GlobalPermission global_permission = 2;
optional .GlobalPermission global_permission = 2;
optional bool has_compression = 1;
optional bool has_compression = 1;
optional bool has_compression = 1;
optional bytes hash = 5;
optional bytes hash = 5;
optional bytes hash = 5;
optional bool has_tag_compression = 3;
optional bool has_tag_compression = 3;
optional bool has_tag_compression = 3;
optional .HBaseVersionFileContent hbase_version = 1;
optional .HBaseVersionFileContent hbase_version = 1;
optional .HBaseVersionFileContent hbase_version = 1;
optional .HBaseVersionFileContent hbase_version = 1;
optional .HBaseVersionFileContent hbase_version = 1;
optional .HBaseVersionFileContent hbase_version = 1;
optional .HBaseVersionFileContent hbase_version = 1;
optional int32 heapOccupancy = 2 [default = 0];
optional int32 heapOccupancy = 2 [default = 0];
optional int32 heapOccupancy = 2 [default = 0];
optional int32 heapSizeMB = 4;
optional int32 heapSizeMB = 4;
optional int32 heapSizeMB = 4;
optional string hfile = 3;
optional string hfile = 3;
optional string hfile = 3;
optional string hfile = 3;
optional string hfile = 3;
optional string hfile = 3;
required string host_name = 1;
required string host_name = 1;
required string host_name = 1;
optional string hostname = 3;
optional string hostname = 3;
optional string hostname = 3;
required string host_name = 1;
required string host_name = 1;
required string host_name = 1;
optional string hostname = 3;
optional string hostname = 3;
optional string hostname = 3;
HConstants.CATALOG_FAMILY and
qualifier of the catalog table result.
Result.
HTable.
HTable.
Enum.ordinal().
required int32 id = 1;
required int32 id = 1;
required int32 id = 1;
optional int64 id = 4;
optional int64 id = 4;
optional int64 id = 4;
optional bytes identifier = 1;
optional bytes identifier = 1;
optional bytes identifier = 1;
optional bytes identifier = 1;
optional bytes identifier = 1;
optional bytes identifier = 1;
optional uint64 if_older_than_ts = 2;
optional uint64 if_older_than_ts = 2;
optional uint64 if_older_than_ts = 2;
optional uint32 index = 1;
optional uint32 index = 1;
optional uint32 index = 1;
optional uint32 index = 1;
optional uint32 index = 1;
optional uint32 index = 1;
optional int32 infoPort = 1;
optional int32 infoPort = 1;
optional int32 infoPort = 1;
optional uint32 info_server_port = 9;
optional uint32 info_server_port = 9;
optional uint32 info_server_port = 9;
optional bool inMemory = 4;
optional bool inMemory = 4;
optional bool inMemory = 4;
optional string instance = 2;
optional string instance = 2;
optional string instance = 2;
optional string instance = 2;
optional string instance = 2;
optional string instance = 2;
name property as an int, possibly
referring to the deprecated name of the configuration property.
required string interpreter_class_name = 1;
required string interpreter_class_name = 1;
required string interpreter_class_name = 1;
required string interpreter_class_name = 1;
required string interpreter_class_name = 1;
required string interpreter_class_name = 1;
optional bytes interpreter_specific_bytes = 3;
optional bytes interpreter_specific_bytes = 3;
optional bytes interpreter_specific_bytes = 3;
required bool is_master_running = 1;
required bool is_master_running = 1;
required bool is_master_running = 1;
optional bool isRecovering = 3;
optional bool isRecovering = 3;
optional bool isRecovering = 3;
optional bool is_shared = 4;
optional bool is_shared = 4;
optional bool is_shared = 4;
optional int64 issue_date = 4;
optional int64 issue_date = 4;
optional int64 issue_date = 4;
optional bytes iv = 4;
optional bytes iv = 4;
optional bytes iv = 4;
optional string jerseyVersion = 5;
optional string jerseyVersion = 5;
optional string jerseyVersion = 5;
optional string jerseyVersion = 5;
optional string jerseyVersion = 5;
optional string jerseyVersion = 5;
optional string jvmVersion = 2;
optional string jvmVersion = 2;
optional string jvmVersion = 2;
optional string jvmVersion = 2;
optional string jvmVersion = 2;
optional string jvmVersion = 2;
required .WALKey key = 1;
required .WALKey key = 1;
required .WALKey key = 1;
required bytes key = 3;
required bytes key = 3;
required bytes key = 3;
required bytes key = 1;
required bytes key = 1;
required bytes key = 1;
required .WALKey key = 1;
KeyValue instance with the provided
characteristics would take up in its underlying data structure for the key.
required int32 key_id = 3;
required int32 key_id = 3;
required int32 key_id = 3;
required .WALKey key = 1;
required .WALKey key = 1;
required .WALKey key = 1;
optional .CellType key_type = 5;
optional .CellType key_type = 5;
optional .CellType key_type = 5;
repeated bytes key_value_bytes = 2;
repeated bytes key_value_bytes = 2;
repeated bytes key_value_bytes = 2;
repeated bytes key_value_bytes = 2;
repeated bytes key_value_bytes = 2;
repeated bytes key_value_bytes = 2;
repeated bytes key_value_bytes = 2;
repeated bytes key_value_bytes = 2;
repeated bytes key_value_bytes = 2;
rpc getKeyValueCount(.CountRequest) returns (.CountResponse);
rpc getKeyValueCount(.CountRequest) returns (.CountResponse);
KeyValue instance with the provided
characteristics would take up for its underlying data structure.
KeyValue instance with the provided
characteristics would take up for its underlying data structure.
KeyValue instance with the provided
characteristics would take up for its underlying data structure.
WALEdit.getCells() instead
required .TokenIdentifier.Kind kind = 1;
required .TokenIdentifier.Kind kind = 1;
required .TokenIdentifier.Kind kind = 1;
optional string kind = 3;
optional string kind = 3;
optional string kind = 3;
optional string kind = 3;
optional string kind = 3;
optional string kind = 3;
repeated string label = 1;
repeated string label = 1;
repeated string label = 1;
repeated bytes label = 1;
repeated bytes label = 1;
repeated bytes label = 1;
required bytes label = 1;
required bytes label = 1;
required bytes label = 1;
repeated string label = 1;
repeated string label = 1;
repeated string label = 1;
repeated string label = 1;
repeated string label = 1;
repeated string label = 1;
repeated bytes label = 1;
repeated bytes label = 1;
repeated bytes label = 1;
repeated string label = 1;
repeated string label = 1;
repeated string label = 1;
repeated bytes label = 1;
repeated bytes label = 1;
repeated bytes label = 1;
repeated string labels = 10;
repeated string labels = 10;
repeated string labels = 10;
repeated string labels = 10;
repeated string labels = 10;
repeated string labels = 10;
repeated string labels = 10;
repeated string labels = 10;
repeated string labels = 10;
repeated string labels = 10;
repeated string labels = 10;
repeated string labels = 10;
optional uint64 last_data_block_offset = 10;
optional uint64 last_data_block_offset = 10;
optional uint64 last_data_block_offset = 10;
required uint64 last_flushed_sequence_id = 1;
required uint64 last_flushed_sequence_id = 1;
required uint64 last_flushed_sequence_id = 1;
rpc GetLastFlushedSequenceId(.GetLastFlushedSequenceIdRequest) returns (.GetLastFlushedSequenceIdResponse);
rpc GetLastFlushedSequenceId(.GetLastFlushedSequenceIdRequest) returns (.GetLastFlushedSequenceIdResponse);
required uint64 last_flushed_sequence_id = 1;
required uint64 last_flushed_sequence_id = 1;
required uint64 last_flushed_sequence_id = 1;
required uint64 last_flush_time = 1;
required uint64 last_flush_time = 1;
required uint64 last_flush_time = 1;
HFile version 1: move this to StoreFile after
Ryan's patch goes in to eliminate KeyValue here.
optional bool latest_version_only = 6;
optional bool latest_version_only = 6;
optional bool latest_version_only = 6;
required uint64 least_sig_bits = 1;
required uint64 least_sig_bits = 1;
required uint64 least_sig_bits = 1;
required bool len_as_val = 1;
required bool len_as_val = 1;
required bool len_as_val = 1;
required uint32 length = 2;
required uint32 length = 2;
required uint32 length = 2;
optional uint32 length = 1;
optional uint32 length = 1;
optional uint32 length = 1;
required int32 limit = 1;
required int32 limit = 1;
required int32 limit = 1;
required int32 limit = 1;
required int32 limit = 1;
required int32 limit = 1;
optional int32 line_number = 4;
optional int32 line_number = 4;
optional int32 line_number = 4;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;
repeated .LiveServerInfo live_servers = 2;
repeated .LiveServerInfo live_servers = 2;
repeated .LiveServerInfo live_servers = 2;
repeated .LiveServerInfo live_servers = 2;
repeated .LiveServerInfo live_servers = 2;
repeated .LiveServerInfo live_servers = 2;
repeated .LiveServerInfo live_servers = 2;
repeated .LiveServerInfo live_servers = 2;
repeated .LiveServerInfo live_servers = 2;
repeated .LiveServerInfo live_servers = 2;
repeated .LiveServerInfo live_servers = 2;
repeated .LiveServerInfo live_servers = 2;
repeated .LiveServerInfo live_servers = 2;
repeated .LiveServerInfo live_servers = 2;
repeated .LiveServerInfo live_servers = 2;
repeated .LiveServerInfo live_servers = 2;
repeated .LiveServerInfo live_servers = 2;
optional .ServerLoad load = 2;
optional .ServerLoad load = 2;
optional .ServerLoad load = 2;
LoadBalancer class name.
optional .ServerLoad load = 2;
optional bool load_column_families_on_demand = 13;
optional bool load_column_families_on_demand = 13;
optional bool load_column_families_on_demand = 13;
required bool loaded = 1;
required bool loaded = 1;
required bool loaded = 1;
required bool loaded = 1;
required bool loaded = 1;
required bool loaded = 1;
BlockCacheUtil.CachedBlocksByFile instance and load it up by iterating content in
BlockCache.
optional uint64 load_on_open_data_offset = 2;
optional uint64 load_on_open_data_offset = 2;
optional uint64 load_on_open_data_offset = 2;
optional .ServerLoad load = 2;
optional .ServerLoad load = 2;
optional .ServerLoad load = 2;
optional .RegionLoadStats loadStats = 5;
optional .RegionLoadStats loadStats = 5;
optional .RegionLoadStats loadStats = 5;
optional .RegionLoadStats loadStats = 5;
optional .RegionLoadStats loadStats = 5;
optional .RegionLoadStats loadStats = 5;
optional .RegionLoadStats loadStats = 5;
optional string location = 5;
optional string location = 5;
optional string location = 5;
optional string location = 5;
optional string location = 5;
optional string location = 5;
repeated string locations = 2;
repeated string locations = 2;
repeated string locations = 2;
repeated string locations = 2;
repeated string locations = 2;
repeated string locations = 2;
repeated string locations = 2;
repeated string locations = 2;
repeated string locations = 2;
repeated string locations = 2;
repeated string locations = 2;
repeated string locations = 2;
required string lock_owner = 1;
required string lock_owner = 1;
required string lock_owner = 1;
optional .ServerName lock_owner = 2;
optional .ServerName lock_owner = 2;
optional .ServerName lock_owner = 2;
optional .ServerName lock_owner = 2;
required string lock_owner = 1;
required string lock_owner = 1;
required string lock_owner = 1;
optional .ServerName lock_owner = 2;
optional .ServerName lock_owner = 2;
optional .ServerName lock_owner = 2;
required uint64 log_sequence_number = 3;
required uint64 log_sequence_number = 3;
required uint64 log_sequence_number = 3;
required int64 long_msg = 1;
required int64 long_msg = 1;
required int64 long_msg = 1;
optional bool major = 2;
optional bool major = 2;
optional bool major = 2;
repeated .NameStringPair map_entries = 1;
repeated .NameStringPair map_entries = 1;
repeated .NameStringPair map_entries = 1;
repeated .NameStringPair map_entries = 1;
repeated .NameStringPair map_entries = 1;
repeated .NameStringPair map_entries = 1;
repeated .NameStringPair map_entries = 1;
repeated .NameStringPair map_entries = 1;
repeated .NameStringPair map_entries = 1;
repeated .NameStringPair map_entries = 1;
repeated .NameStringPair map_entries = 1;
repeated .NameStringPair map_entries = 1;
repeated .NameStringPair map_entries = 1;
repeated .NameStringPair map_entries = 1;
repeated .NameStringPair map_entries = 1;
repeated .NameStringPair map_entries = 1;
repeated .NameStringPair map_entries = 1;
repeated .BytesBytesPair map_entry = 1;
repeated .BytesBytesPair map_entry = 1;
repeated .BytesBytesPair map_entry = 1;
repeated .BytesBytesPair map_entry = 1;
repeated .BytesBytesPair map_entry = 1;
repeated .BytesBytesPair map_entry = 1;
repeated .BytesBytesPair map_entry = 1;
repeated .BytesBytesPair map_entry = 1;
repeated .BytesBytesPair map_entry = 1;
repeated .BytesBytesPair map_entry = 1;
repeated .BytesBytesPair map_entry = 1;
repeated .BytesBytesPair map_entry = 1;
repeated .BytesBytesPair map_entry = 1;
repeated .BytesBytesPair map_entry = 1;
repeated .BytesBytesPair map_entry = 1;
repeated .BytesBytesPair map_entry = 1;
repeated .BytesBytesPair map_entry = 1;
MasterKeepAliveConnection to the active master
ServerName.
optional .ServerName master = 7;
optional .ServerName master = 7;
optional .ServerName master = 7;
required .ServerName master = 1;
required .ServerName master = 1;
required .ServerName master = 1;
optional .ServerName master = 7;
required .ServerName master = 1;
repeated .Coprocessor master_coprocessors = 6;
repeated .Coprocessor master_coprocessors = 6;
repeated .Coprocessor master_coprocessors = 6;
repeated .Coprocessor master_coprocessors = 6;
repeated .Coprocessor master_coprocessors = 6;
repeated .Coprocessor master_coprocessors = 6;
repeated .Coprocessor master_coprocessors = 6;
repeated .Coprocessor master_coprocessors = 6;
repeated .Coprocessor master_coprocessors = 6;
repeated .Coprocessor master_coprocessors = 6;
repeated .Coprocessor master_coprocessors = 6;
repeated .Coprocessor master_coprocessors = 6;
repeated .Coprocessor master_coprocessors = 6;
repeated .Coprocessor master_coprocessors = 6;
repeated .Coprocessor master_coprocessors = 6;
repeated .Coprocessor master_coprocessors = 6;
repeated .Coprocessor master_coprocessors = 6;
optional .ServerName master = 7;
optional .ServerName master = 7;
optional .ServerName master = 7;
required .ServerName master = 1;
required .ServerName master = 1;
required .ServerName master = 1;
rpc GetMax(.AggregateRequest) returns (.AggregateResponse);
rpc GetMax(.AggregateRequest) returns (.AggregateResponse);
optional bytes max_column = 3;
optional bytes max_column = 3;
optional bytes max_column = 3;
optional bool max_column_inclusive = 4;
optional bool max_column_inclusive = 4;
optional bool max_column_inclusive = 4;
optional uint32 max_heap_MB = 4;
optional uint32 max_heap_MB = 4;
optional uint32 max_heap_MB = 4;
optional int32 maxHeapSizeMB = 5;
optional int32 maxHeapSizeMB = 5;
optional int32 maxHeapSizeMB = 5;
optional uint64 max_result_size = 10;
optional uint64 max_result_size = 10;
optional uint64 max_result_size = 10;
optional uint32 max_versions = 6 [default = 1];
optional uint32 max_versions = 6 [default = 1];
optional uint32 max_versions = 6 [default = 1];
optional uint32 max_versions = 7 [default = 1];
optional uint32 max_versions = 7 [default = 1];
optional uint32 max_versions = 7 [default = 1];
optional int32 maxVersions = 4;
optional int32 maxVersions = 4;
optional int32 maxVersions = 4;
optional int32 maxVersions = 7;
optional int32 maxVersions = 7;
optional int32 maxVersions = 7;
rpc GetMedian(.AggregateRequest) returns (.AggregateResponse);
rpc GetMedian(.AggregateRequest) returns (.AggregateResponse);
optional int32 memstoreLoad = 1 [default = 0];
optional int32 memstoreLoad = 1 [default = 0];
optional int32 memstoreLoad = 1 [default = 0];
optional uint32 memstore_size_MB = 6;
optional uint32 memstore_size_MB = 6;
optional uint32 memstore_size_MB = 6;
optional int32 memstoreSizeMB = 5;
optional int32 memstoreSizeMB = 5;
optional int32 memstoreSizeMB = 5;
optional string message = 2;
optional string message = 2;
optional string message = 2;
optional string message = 2;
optional string message = 2;
optional string message = 2;
optional uint32 meta_index_count = 6;
optional uint32 meta_index_count = 6;
optional uint32 meta_index_count = 6;
hbase:meta or null if location is
not currently available.
required string method_name = 3;
required string method_name = 3;
required string method_name = 3;
optional string method_name = 2;
optional string method_name = 2;
optional string method_name = 2;
optional string method_name = 3;
optional string method_name = 3;
optional string method_name = 3;
required string method_name = 3;
required string method_name = 3;
required string method_name = 3;
optional string method_name = 2;
optional string method_name = 2;
optional string method_name = 2;
optional string method_name = 3;
optional string method_name = 3;
optional string method_name = 3;
repeated .NameInt64Pair metrics = 1;
repeated .NameInt64Pair metrics = 1;
repeated .NameInt64Pair metrics = 1;
repeated .NameInt64Pair metrics = 1;
repeated .NameInt64Pair metrics = 1;
repeated .NameInt64Pair metrics = 1;
repeated .NameInt64Pair metrics = 1;
repeated .NameInt64Pair metrics = 1;
repeated .NameInt64Pair metrics = 1;
repeated .NameInt64Pair metrics = 1;
repeated .NameInt64Pair metrics = 1;
repeated .NameInt64Pair metrics = 1;
repeated .NameInt64Pair metrics = 1;
repeated .NameInt64Pair metrics = 1;
repeated .NameInt64Pair metrics = 1;
repeated .NameInt64Pair metrics = 1;
repeated .NameInt64Pair metrics = 1;
rpc GetMin(.AggregateRequest) returns (.AggregateResponse);
rpc GetMin(.AggregateRequest) returns (.AggregateResponse);
optional bytes min_column = 1;
optional bytes min_column = 1;
optional bytes min_column = 1;
optional bool min_column_inclusive = 2;
optional bool min_column_inclusive = 2;
optional bool min_column_inclusive = 2;
optional .SplitLogTask.RecoveryMode mode = 3 [default = UNKNOWN];
optional .SplitLogTask.RecoveryMode mode = 3 [default = UNKNOWN];
optional .SplitLogTask.RecoveryMode mode = 3 [default = UNKNOWN];
optional bool more_results = 3;
optional bool more_results = 3;
optional bool more_results = 3;
optional bool more_results_in_region = 8;
optional bool more_results_in_region = 8;
optional bool more_results_in_region = 8;
required uint64 most_sig_bits = 2;
required uint64 most_sig_bits = 2;
required uint64 most_sig_bits = 2;
optional .MutationProto.MutationType mutate_type = 2;
optional .MutationProto.MutationType mutate_type = 2;
optional .MutationProto.MutationType mutate_type = 2;
optional .MutationProto mutation = 2;
optional .MutationProto mutation = 2;
optional .MutationProto mutation = 2;
required .MutationProto mutation = 2;
required .MutationProto mutation = 2;
required .MutationProto mutation = 2;
optional .MutationProto mutation = 2;
required .MutationProto mutation = 2;
optional .MutationProto mutation = 2;
optional .MutationProto mutation = 2;
optional .MutationProto mutation = 2;
required .MutationProto mutation = 2;
required .MutationProto mutation = 2;
required .MutationProto mutation = 2;
repeated .MutationProto mutation_request = 1;
repeated .MutationProto mutation_request = 1;
repeated .MutationProto mutation_request = 1;
repeated .MutationProto mutation_request = 1;
repeated .MutationProto mutation_request = 1;
repeated .MutationProto mutation_request = 1;
repeated .MutationProto mutation_request = 1;
repeated .MutationProto mutation_request = 1;
repeated .MutationProto mutation_request = 1;
repeated .MutationProto mutation_request = 1;
repeated .MutationProto mutation_request = 1;
repeated .MutationProto mutation_request = 1;
repeated .MutationProto mutation_request = 1;
repeated .MutationProto mutation_request = 1;
repeated .MutationProto mutation_request = 1;
repeated .MutationProto mutation_request = 1;
repeated .MutationProto mutation_request = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required bytes name = 1;
required bytes name = 1;
required bytes name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
optional string name = 1;
optional string name = 1;
optional string name = 1;
required bytes name = 1;
required bytes name = 1;
required bytes name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
optional string name = 1;
optional string name = 1;
optional string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required bytes name = 1;
required bytes name = 1;
required bytes name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
repeated string name = 1;
repeated string name = 1;
repeated string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
optional string name = 1;
optional string name = 1;
optional string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
optional string name = 1;
optional string name = 1;
optional string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
optional string name = 1;
optional string name = 1;
optional string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
repeated string name = 1;
repeated string name = 1;
repeated string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
optional string name = 1;
optional string name = 1;
optional string name = 1;
repeated string name = 1;
repeated string name = 1;
repeated string name = 1;
ThreadFactory that names each created thread uniquely,
with a common prefix.
repeated string name = 1;
repeated string name = 1;
repeated string name = 1;
required bytes namespace = 1;
required bytes namespace = 1;
required bytes namespace = 1;
required .NamespaceDescriptor namespaceDescriptor = 1;
required .NamespaceDescriptor namespaceDescriptor = 1;
required .NamespaceDescriptor namespaceDescriptor = 1;
required .NamespaceDescriptor namespaceDescriptor = 1;
required .NamespaceDescriptor namespaceDescriptor = 1;
required .NamespaceDescriptor namespaceDescriptor = 1;
repeated .NamespaceDescriptor namespaceDescriptor = 1;
repeated .NamespaceDescriptor namespaceDescriptor = 1;
repeated .NamespaceDescriptor namespaceDescriptor = 1;
rpc GetNamespaceDescriptor(.GetNamespaceDescriptorRequest) returns (.GetNamespaceDescriptorResponse);
rpc GetNamespaceDescriptor(.GetNamespaceDescriptorRequest) returns (.GetNamespaceDescriptorResponse);
required .NamespaceDescriptor namespaceDescriptor = 1;
required .NamespaceDescriptor namespaceDescriptor = 1;
required .NamespaceDescriptor namespaceDescriptor = 1;
required .NamespaceDescriptor namespaceDescriptor = 1;
required .NamespaceDescriptor namespaceDescriptor = 1;
repeated .NamespaceDescriptor namespaceDescriptor = 1;
required .NamespaceDescriptor namespaceDescriptor = 1;
repeated .NamespaceDescriptor namespaceDescriptor = 1;
repeated .NamespaceDescriptor namespaceDescriptor = 1;
repeated .NamespaceDescriptor namespaceDescriptor = 1;
repeated .NamespaceDescriptor namespaceDescriptor = 1;
repeated .NamespaceDescriptor namespaceDescriptor = 1;
repeated .NamespaceDescriptor namespaceDescriptor = 1;
repeated .NamespaceDescriptor namespaceDescriptor = 1;
required .NamespaceDescriptor namespaceDescriptor = 1;
required .NamespaceDescriptor namespaceDescriptor = 1;
required .NamespaceDescriptor namespaceDescriptor = 1;
required .NamespaceDescriptor namespaceDescriptor = 1;
required .NamespaceDescriptor namespaceDescriptor = 1;
required .NamespaceDescriptor namespaceDescriptor = 1;
repeated .NamespaceDescriptor namespaceDescriptor = 1;
repeated .NamespaceDescriptor namespaceDescriptor = 1;
repeated .NamespaceDescriptor namespaceDescriptor = 1;
required .NamespaceDescriptor namespaceDescriptor = 1;
required .NamespaceDescriptor namespaceDescriptor = 1;
required .NamespaceDescriptor namespaceDescriptor = 1;
repeated .NamespaceDescriptor namespaceDescriptor = 1;
repeated .NamespaceDescriptor namespaceDescriptor = 1;
repeated .NamespaceDescriptor namespaceDescriptor = 1;
Path object representing
the namespace directory under path rootdir
optional bytes namespace_name = 3;
optional bytes namespace_name = 3;
optional bytes namespace_name = 3;
optional bytes namespace_name = 1;
optional bytes namespace_name = 1;
optional bytes namespace_name = 1;
required string namespaceName = 1;
required string namespaceName = 1;
required string namespaceName = 1;
required string namespaceName = 1;
required string namespaceName = 1;
required string namespaceName = 1;
required string namespaceName = 1;
required string namespaceName = 1;
required string namespaceName = 1;
required string namespaceName = 1;
required string namespaceName = 1;
required string namespaceName = 1;
required string namespaceName = 1;
required string namespaceName = 1;
required string namespaceName = 1;
required string namespaceName = 1;
required string namespaceName = 1;
required string namespaceName = 1;
required string namespaceName = 1;
required string namespaceName = 1;
required string namespaceName = 1;
required string namespaceName = 1;
required string namespaceName = 1;
required string namespaceName = 1;
optional .NamespacePermission namespace_permission = 3;
optional .NamespacePermission namespace_permission = 3;
optional .NamespacePermission namespace_permission = 3;
optional .NamespacePermission namespace_permission = 3;
optional .NamespacePermission namespace_permission = 3;
optional .NamespacePermission namespace_permission = 3;
optional .NamespacePermission namespace_permission = 3;
optional uint64 next_call_seq = 6;
optional uint64 next_call_seq = 6;
optional uint64 next_call_seq = 6;
optional uint64 nonce = 9;
optional uint64 nonce = 9;
optional uint64 nonce = 9;
optional uint64 nonce = 3;
optional uint64 nonce = 3;
optional uint64 nonce = 3;
optional uint64 nonce = 5;
optional uint64 nonce = 5;
optional uint64 nonce = 5;
optional uint64 nonce = 10;
optional uint64 nonce = 10;
optional uint64 nonce = 10;
optional uint64 nonceGroup = 2;
optional uint64 nonceGroup = 2;
optional uint64 nonceGroup = 2;
optional uint64 nonce_group = 4;
optional uint64 nonce_group = 4;
optional uint64 nonce_group = 4;
optional uint64 nonce_group = 2;
optional uint64 nonce_group = 2;
optional uint64 nonce_group = 2;
optional uint64 nonce_group = 4;
optional uint64 nonce_group = 4;
optional uint64 nonce_group = 4;
optional uint64 nonceGroup = 9;
optional uint64 nonceGroup = 9;
optional uint64 nonceGroup = 9;
optional uint32 number_of_requests = 1;
optional uint32 number_of_requests = 1;
optional uint32 number_of_requests = 1;
optional uint32 number_of_rows = 4;
optional uint32 number_of_rows = 4;
optional uint32 number_of_rows = 4;
optional uint32 num_data_index_levels = 8;
optional uint32 num_data_index_levels = 8;
optional uint32 num_data_index_levels = 8;
optional bool offline = 5;
optional bool offline = 5;
optional bool offline = 5;
optional int32 offset = 2;
optional int32 offset = 2;
optional int32 offset = 2;
required bool on = 1;
required bool on = 1;
required bool on = 1;
rpc GetOnlineRegion(.GetOnlineRegionRequest) returns (.GetOnlineRegionResponse);
rpc GetOnlineRegion(.GetOnlineRegionRequest) returns (.GetOnlineRegionResponse);
optional bool openForDistributedLogReplay = 4;
optional bool openForDistributedLogReplay = 4;
optional bool openForDistributedLogReplay = 4;
repeated .OpenRegionRequest.RegionOpenInfo open_info = 1;
repeated .OpenRegionRequest.RegionOpenInfo open_info = 1;
repeated .OpenRegionRequest.RegionOpenInfo open_info = 1;
repeated .OpenRegionRequest.RegionOpenInfo open_info = 1;
repeated .OpenRegionRequest.RegionOpenInfo open_info = 1;
repeated .OpenRegionRequest.RegionOpenInfo open_info = 1;
repeated .OpenRegionRequest.RegionOpenInfo open_info = 1;
repeated .OpenRegionRequest.RegionOpenInfo open_info = 1;
repeated .OpenRegionRequest.RegionOpenInfo open_info = 1;
repeated .OpenRegionRequest.RegionOpenInfo open_info = 1;
repeated .OpenRegionRequest.RegionOpenInfo open_info = 1;
repeated .OpenRegionRequest.RegionOpenInfo open_info = 1;
repeated .OpenRegionRequest.RegionOpenInfo open_info = 1;
repeated .OpenRegionRequest.RegionOpenInfo open_info = 1;
repeated .OpenRegionRequest.RegionOpenInfo open_info = 1;
repeated .OpenRegionRequest.RegionOpenInfo open_info = 1;
repeated .OpenRegionRequest.RegionOpenInfo open_info = 1;
repeated .OpenRegionResponse.RegionOpeningState opening_state = 1;
repeated .OpenRegionResponse.RegionOpeningState opening_state = 1;
repeated .OpenRegionResponse.RegionOpeningState opening_state = 1;
repeated .OpenRegionResponse.RegionOpeningState opening_state = 1;
repeated .OpenRegionResponse.RegionOpeningState opening_state = 1;
repeated .OpenRegionResponse.RegionOpeningState opening_state = 1;
repeated .OpenRegionResponse.RegionOpeningState opening_state = 1;
repeated .OpenRegionResponse.RegionOpeningState opening_state = 1;
repeated .OpenRegionResponse.RegionOpeningState opening_state = 1;
optional uint64 open_seq_num = 3;
optional uint64 open_seq_num = 3;
optional uint64 open_seq_num = 3;
required .FilterList.Operator operator = 1;
required .FilterList.Operator operator = 1;
required .FilterList.Operator operator = 1;
Order imposed by this data type, or null when
natural ordering is not preserved.
optional uint32 ordinal = 2;
optional uint32 ordinal = 2;
optional uint32 ordinal = 2;
optional string osVersion = 3;
optional string osVersion = 3;
optional string osVersion = 3;
optional string osVersion = 3;
optional string osVersion = 3;
optional string osVersion = 3;
required int64 page_size = 1;
required int64 page_size = 1;
required int64 page_size = 1;
optional int64 parent_id = 2;
optional int64 parent_id = 2;
optional int64 parent_id = 2;
optional bytes password = 2;
optional bytes password = 2;
optional bytes password = 2;
optional bytes password = 2;
optional bytes password = 2;
optional bytes password = 2;
required string path = 2;
required string path = 2;
required string path = 2;
required string path = 2;
required string path = 2;
required string path = 2;
required string pattern = 1;
required string pattern = 1;
required string pattern = 1;
required string pattern = 1;
required string pattern = 1;
required string pattern = 1;
required int32 pattern_flags = 2;
required int32 pattern_flags = 2;
required int32 pattern_flags = 2;
optional bytes payload = 5;
optional bytes payload = 5;
optional bytes payload = 5;
required string peerID = 1;
required string peerID = 1;
required string peerID = 1;
required string peerID = 1;
required string peerID = 1;
required string peerID = 1;
repeated .Permission permission = 1;
repeated .Permission permission = 1;
repeated .Permission permission = 1;
required .Permission permission = 3;
required .Permission permission = 3;
required .Permission permission = 3;
repeated .Permission permission = 1;
required .Permission permission = 3;
repeated .Permission permission = 1;
repeated .Permission permission = 1;
repeated .Permission permission = 1;
repeated .Permission permission = 1;
repeated .Permission permission = 1;
repeated .Permission permission = 1;
repeated .Permission permission = 1;
repeated .Permission permission = 1;
repeated .Permission permission = 1;
repeated .Permission permission = 1;
required .Permission permission = 3;
required .Permission permission = 3;
required .Permission permission = 3;
repeated .Permission permission = 1;
repeated .Permission permission = 1;
repeated .Permission permission = 1;
repeated .Permission permissions = 2;
repeated .Permission permissions = 2;
repeated .Permission permissions = 2;
repeated .Permission permissions = 2;
repeated .Permission permissions = 2;
repeated .Permission permissions = 2;
repeated .Permission permissions = 2;
repeated .Permission permissions = 2;
repeated .Permission permissions = 2;
repeated .Permission permissions = 2;
repeated .Permission permissions = 2;
repeated .Permission permissions = 2;
repeated .Permission permissions = 2;
repeated .Permission permissions = 2;
repeated .Permission permissions = 2;
repeated .Permission permissions = 2;
repeated .Permission permissions = 2;
PoolMap.PoolType.RoundRobin.
PoolMap.PoolType.RoundRobin or PoolMap.PoolType.ThreadLocal,
otherwise default to the former.
optional uint32 port = 2;
optional uint32 port = 2;
optional uint32 port = 2;
required uint32 port = 1;
required uint32 port = 1;
required uint32 port = 1;
optional int32 port = 4;
optional int32 port = 4;
optional int32 port = 4;
required int64 position = 1;
required int64 position = 1;
required int64 position = 1;
position marker.
required bytes prefix = 1;
required bytes prefix = 1;
required bytes prefix = 1;
optional bytes prefix = 1;
optional bytes prefix = 1;
optional bytes prefix = 1;
optional bool preserveSplits = 2 [default = false];
optional bool preserveSplits = 2 [default = false];
optional bool preserveSplits = 2 [default = false];
optional bool prev_balance_value = 1;
optional bool prev_balance_value = 1;
optional bool prev_balance_value = 1;
optional bool prev_value = 1;
optional bool prev_value = 1;
optional bool prev_value = 1;
optional uint32 priority = 6;
optional uint32 priority = 6;
optional uint32 priority = 6;
required .ProcedureDescription procedure = 1;
required .ProcedureDescription procedure = 1;
required .ProcedureDescription procedure = 1;
optional .ProcedureDescription procedure = 1;
optional .ProcedureDescription procedure = 1;
optional .ProcedureDescription procedure = 1;
required .ProcedureDescription procedure = 1;
optional .ProcedureDescription procedure = 1;
required .ProcedureDescription procedure = 1;
required .ProcedureDescription procedure = 1;
required .ProcedureDescription procedure = 1;
optional .ProcedureDescription procedure = 1;
optional .ProcedureDescription procedure = 1;
optional .ProcedureDescription procedure = 1;
optional bool processed = 2;
optional bool processed = 2;
optional bool processed = 2;
optional bool processed = 2;
optional bool processed = 2;
optional bool processed = 2;
optional string purpose = 5;
optional string purpose = 5;
optional string purpose = 5;
optional string purpose = 5;
optional string purpose = 5;
optional string purpose = 5;
CellUtil.cloneQualifier(Cell)
optional bytes qualifier = 3;
optional bytes qualifier = 3;
optional bytes qualifier = 3;
optional bytes qualifier = 3;
optional bytes qualifier = 3;
optional bytes qualifier = 3;
required bytes qualifier = 3;
required bytes qualifier = 3;
required bytes qualifier = 3;
repeated bytes qualifier = 2;
repeated bytes qualifier = 2;
repeated bytes qualifier = 2;
required bytes qualifier = 3;
required bytes qualifier = 3;
required bytes qualifier = 3;
optional bytes qualifier = 1;
optional bytes qualifier = 1;
optional bytes qualifier = 1;
required bytes qualifier = 2;
required bytes qualifier = 2;
required bytes qualifier = 2;
repeated bytes qualifier = 2;
repeated bytes qualifier = 2;
repeated bytes qualifier = 2;
repeated bytes qualifier = 2;
repeated bytes qualifier = 2;
repeated bytes qualifier = 2;
repeated bytes qualifiers = 1;
repeated bytes qualifiers = 1;
repeated bytes qualifiers = 1;
repeated bytes qualifiers = 1;
repeated bytes qualifiers = 1;
repeated bytes qualifiers = 1;
repeated bytes qualifiers = 1;
repeated bytes qualifiers = 1;
repeated bytes qualifiers = 1;
repeated .MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
repeated .MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
repeated .MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
repeated .MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
repeated .MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
repeated .MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
repeated .MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
repeated .MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
repeated .MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
repeated .MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
repeated .MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
repeated .MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
repeated .MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
repeated .MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
repeated .MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
repeated .MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
repeated .MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
required .Reference.Range range = 2;
required .Reference.Range range = 2;
required .Reference.Range range = 2;
HLog.Reader for reading logs to split.
HLog.Reader for reading logs to split.
optional bool readOnly = 5;
optional bool readOnly = 5;
optional bool readOnly = 5;
optional uint64 read_requests_count = 8;
optional uint64 read_requests_count = 8;
optional uint64 read_requests_count = 8;
optional int64 readRequestsCount = 7;
optional int64 readRequestsCount = 7;
optional int64 readRequestsCount = 7;
optional string real_user = 2;
optional string real_user = 2;
optional string real_user = 2;
optional string real_user = 2;
optional string real_user = 2;
optional string real_user = 2;
required string reason = 1;
required string reason = 1;
required string reason = 1;
required string reason = 1;
required string reason = 1;
required string reason = 1;
optional .Reference reference = 2;
optional .Reference reference = 2;
optional .Reference reference = 2;
optional .Reference reference = 2;
FileStatus of the file referenced by this StoreFileInfo
optional .Reference reference = 2;
optional .Reference reference = 2;
optional .Reference reference = 2;
optional string regex = 1;
optional string regex = 1;
optional string regex = 1;
optional string regex = 1;
optional string regex = 1;
optional string regex = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionInfo region = 1;
required .RegionInfo region = 1;
required .RegionInfo region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionInfo region = 1;
required .RegionInfo region = 1;
required .RegionInfo region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
optional .RegionSpecifier region = 1;
optional .RegionSpecifier region = 1;
optional .RegionSpecifier region = 1;
optional .RegionInfo region = 4;
optional .RegionInfo region = 4;
optional .RegionInfo region = 4;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region_a = 1;
required .RegionSpecifier region_a = 1;
required .RegionSpecifier region_a = 1;
required .RegionSpecifier region_a = 1;
required .RegionSpecifier region_a = 1;
required .RegionSpecifier region_a = 1;
required .RegionSpecifier region_a = 1;
required .RegionSpecifier region_a = 1;
repeated .RegionAction regionAction = 1;
repeated .RegionAction regionAction = 1;
repeated .RegionAction regionAction = 1;
repeated .RegionAction regionAction = 1;
repeated .RegionAction regionAction = 1;
repeated .RegionAction regionAction = 1;
repeated .RegionAction regionAction = 1;
repeated .RegionAction regionAction = 1;
repeated .RegionAction regionAction = 1;
repeated .RegionAction regionAction = 1;
repeated .RegionAction regionAction = 1;
repeated .RegionAction regionAction = 1;
repeated .RegionAction regionAction = 1;
repeated .RegionAction regionAction = 1;
repeated .RegionAction regionAction = 1;
repeated .RegionAction regionAction = 1;
repeated .RegionAction regionAction = 1;
repeated .RegionActionResult regionActionResult = 1;
repeated .RegionActionResult regionActionResult = 1;
repeated .RegionActionResult regionActionResult = 1;
repeated .RegionActionResult regionActionResult = 1;
repeated .RegionActionResult regionActionResult = 1;
repeated .RegionActionResult regionActionResult = 1;
repeated .RegionActionResult regionActionResult = 1;
repeated .RegionActionResult regionActionResult = 1;
repeated .RegionActionResult regionActionResult = 1;
repeated .RegionActionResult regionActionResult = 1;
repeated .RegionActionResult regionActionResult = 1;
repeated .RegionActionResult regionActionResult = 1;
repeated .RegionActionResult regionActionResult = 1;
repeated .RegionActionResult regionActionResult = 1;
repeated .RegionActionResult regionActionResult = 1;
repeated .RegionActionResult regionActionResult = 1;
repeated .RegionActionResult regionActionResult = 1;
required .RegionSpecifier region_a = 1;
required .RegionSpecifier region_a = 1;
required .RegionSpecifier region_a = 1;
required .RegionSpecifier region_a = 1;
required .RegionSpecifier region_a = 1;
required .RegionSpecifier region_a = 1;
required .RegionSpecifier region_b = 2;
required .RegionSpecifier region_b = 2;
required .RegionSpecifier region_b = 2;
required .RegionSpecifier region_b = 2;
required .RegionSpecifier region_b = 2;
required .RegionSpecifier region_b = 2;
required .RegionSpecifier region_b = 2;
required .RegionSpecifier region_b = 2;
required .RegionSpecifier region_b = 2;
required .RegionSpecifier region_b = 2;
required .RegionSpecifier region_b = 2;
required .RegionSpecifier region_b = 2;
required .RegionSpecifier region_b = 2;
required .RegionSpecifier region_b = 2;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionInfo region = 1;
required .RegionSpecifier region = 1;
required .RegionInfo region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
optional .RegionSpecifier region = 1;
optional .RegionInfo region = 4;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
hbase:meta for passed table.
required uint64 region_id = 1;
required uint64 region_id = 1;
required uint64 region_id = 1;
rpc GetRegionInfo(.GetRegionInfoRequest) returns (.GetRegionInfoResponse);
rpc GetRegionInfo(.GetRegionInfoRequest) returns (.GetRegionInfoResponse);
repeated .RegionInfo region_info = 1;
repeated .RegionInfo region_info = 1;
repeated .RegionInfo region_info = 1;
required .RegionInfo region_info = 1;
required .RegionInfo region_info = 1;
required .RegionInfo region_info = 1;
required .RegionInfo region_info = 1;
required .RegionInfo region_info = 1;
required .RegionInfo region_info = 1;
repeated .RegionInfo region_info = 2;
repeated .RegionInfo region_info = 2;
repeated .RegionInfo region_info = 2;
required .RegionInfo region_info = 2;
required .RegionInfo region_info = 2;
required .RegionInfo region_info = 2;
repeated .RegionInfo region_info = 1;
required .RegionInfo region_info = 1;
required .RegionInfo region_info = 1;
repeated .RegionInfo region_info = 2;
required .RegionInfo region_info = 2;
repeated .RegionInfo region_info = 1;
repeated .RegionInfo region_info = 2;
repeated .RegionInfo region_info = 1;
repeated .RegionInfo region_info = 1;
repeated .RegionInfo region_info = 1;
repeated .RegionInfo region_info = 2;
repeated .RegionInfo region_info = 2;
repeated .RegionInfo region_info = 2;
repeated .RegionInfo region_info = 1;
repeated .RegionInfo region_info = 1;
repeated .RegionInfo region_info = 1;
repeated .RegionInfo region_info = 2;
repeated .RegionInfo region_info = 2;
repeated .RegionInfo region_info = 2;
repeated .RegionInfo region_info = 1;
repeated .RegionInfo region_info = 1;
repeated .RegionInfo region_info = 1;
required .RegionInfo region_info = 1;
required .RegionInfo region_info = 1;
required .RegionInfo region_info = 1;
required .RegionInfo region_info = 1;
required .RegionInfo region_info = 1;
required .RegionInfo region_info = 1;
repeated .RegionInfo region_info = 2;
repeated .RegionInfo region_info = 2;
repeated .RegionInfo region_info = 2;
required .RegionInfo region_info = 2;
required .RegionInfo region_info = 2;
required .RegionInfo region_info = 2;
repeated .RegionInfo region_info = 1;
repeated .RegionInfo region_info = 1;
repeated .RegionInfo region_info = 1;
repeated .RegionInfo region_info = 2;
repeated .RegionInfo region_info = 2;
repeated .RegionInfo region_info = 2;
repeated .RegionLoad region_loads = 5;
repeated .RegionLoad region_loads = 5;
repeated .RegionLoad region_loads = 5;
repeated .RegionLoad region_loads = 5;
repeated .RegionLoad region_loads = 5;
repeated .RegionLoad region_loads = 5;
repeated .RegionLoad region_loads = 5;
repeated .RegionLoad region_loads = 5;
repeated .RegionLoad region_loads = 5;
repeated .RegionLoad region_loads = 5;
repeated .RegionLoad region_loads = 5;
repeated .RegionLoad region_loads = 5;
repeated .RegionLoad region_loads = 5;
repeated .RegionLoad region_loads = 5;
repeated .RegionLoad region_loads = 5;
repeated .RegionLoad region_loads = 5;
repeated .RegionLoad region_loads = 5;
repeated .SnapshotRegionManifest region_manifests = 2;
repeated .SnapshotRegionManifest region_manifests = 2;
repeated .SnapshotRegionManifest region_manifests = 2;
repeated .SnapshotRegionManifest region_manifests = 2;
repeated .SnapshotRegionManifest region_manifests = 2;
repeated .SnapshotRegionManifest region_manifests = 2;
repeated .SnapshotRegionManifest region_manifests = 2;
repeated .SnapshotRegionManifest region_manifests = 2;
repeated .SnapshotRegionManifest region_manifests = 2;
repeated .SnapshotRegionManifest region_manifests = 2;
repeated .SnapshotRegionManifest region_manifests = 2;
repeated .SnapshotRegionManifest region_manifests = 2;
repeated .SnapshotRegionManifest region_manifests = 2;
repeated .SnapshotRegionManifest region_manifests = 2;
repeated .SnapshotRegionManifest region_manifests = 2;
repeated .SnapshotRegionManifest region_manifests = 2;
repeated .SnapshotRegionManifest region_manifests = 2;
required bytes region_name = 1;
required bytes region_name = 1;
required bytes region_name = 1;
optional bytes region_name = 7;
optional bytes region_name = 7;
optional bytes region_name = 7;
required bytes region_name = 2;
required bytes region_name = 2;
required bytes region_name = 2;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionInfo region = 1;
required .RegionInfo region = 1;
required .RegionInfo region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionInfo region = 1;
required .RegionInfo region = 1;
required .RegionInfo region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
optional .RegionSpecifier region = 1;
optional .RegionSpecifier region = 1;
optional .RegionSpecifier region = 1;
optional .RegionInfo region = 4;
optional .RegionInfo region = 4;
optional .RegionInfo region = 4;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
optional int32 regions = 3;
optional int32 regions = 3;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;
optional int32 regions = 3;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;
repeated .RegionInTransition regions_in_transition = 4;
repeated .RegionInTransition regions_in_transition = 4;
repeated .RegionInTransition regions_in_transition = 4;
repeated .RegionInTransition regions_in_transition = 4;
repeated .RegionInTransition regions_in_transition = 4;
repeated .RegionInTransition regions_in_transition = 4;
repeated .RegionInTransition regions_in_transition = 4;
repeated .RegionInTransition regions_in_transition = 4;
repeated .RegionInTransition regions_in_transition = 4;
repeated .RegionInTransition regions_in_transition = 4;
repeated .RegionInTransition regions_in_transition = 4;
repeated .RegionInTransition regions_in_transition = 4;
repeated .RegionInTransition regions_in_transition = 4;
repeated .RegionInTransition regions_in_transition = 4;
repeated .RegionInTransition regions_in_transition = 4;
repeated .RegionInTransition regions_in_transition = 4;
repeated .RegionInTransition regions_in_transition = 4;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;
required .RegionSpecifier region_specifier = 1;
required .RegionSpecifier region_specifier = 1;
required .RegionSpecifier region_specifier = 1;
required .RegionSpecifier region_specifier = 1;
required .RegionSpecifier region_specifier = 1;
required .RegionSpecifier region_specifier = 1;
required .RegionSpecifier region_specifier = 1;
required .RegionState region_state = 2;
required .RegionState region_state = 2;
required .RegionState region_state = 2;
required .RegionState region_state = 2;
required .RegionState region_state = 2;
required .RegionState region_state = 2;
required .RegionState region_state = 2;
repeated bytes region_to_flush = 1;
repeated bytes region_to_flush = 1;
repeated bytes region_to_flush = 1;
repeated bytes region_to_flush = 1;
repeated bytes region_to_flush = 1;
repeated bytes region_to_flush = 1;
repeated bytes region_to_flush = 1;
repeated bytes region_to_flush = 1;
repeated bytes region_to_flush = 1;
optional string replicationEndpointImpl = 2;
optional string replicationEndpointImpl = 2;
optional string replicationEndpointImpl = 2;
optional string replicationEndpointImpl = 2;
optional string replicationEndpointImpl = 2;
optional string replicationEndpointImpl = 2;
required uint64 replicationLag = 5;
required uint64 replicationLag = 5;
required uint64 replicationLag = 5;
optional .ReplicationLoadSink replLoadSink = 11;
optional .ReplicationLoadSink replLoadSink = 11;
optional .ReplicationLoadSink replLoadSink = 11;
optional .ReplicationLoadSink replLoadSink = 11;
optional .ReplicationLoadSink replLoadSink = 11;
optional .ReplicationLoadSink replLoadSink = 11;
optional .ReplicationLoadSink replLoadSink = 11;
repeated .ReplicationLoadSource replLoadSource = 10;
repeated .ReplicationLoadSource replLoadSource = 10;
repeated .ReplicationLoadSource replLoadSource = 10;
repeated .ReplicationLoadSource replLoadSource = 10;
repeated .ReplicationLoadSource replLoadSource = 10;
repeated .ReplicationLoadSource replLoadSource = 10;
repeated .ReplicationLoadSource replLoadSource = 10;
repeated .ReplicationLoadSource replLoadSource = 10;
repeated .ReplicationLoadSource replLoadSource = 10;
repeated .ReplicationLoadSource replLoadSource = 10;
repeated .ReplicationLoadSource replLoadSource = 10;
repeated .ReplicationLoadSource replLoadSource = 10;
repeated .ReplicationLoadSource replLoadSource = 10;
repeated .ReplicationLoadSource replLoadSource = 10;
repeated .ReplicationLoadSource replLoadSource = 10;
repeated .ReplicationLoadSource replLoadSource = 10;
repeated .ReplicationLoadSource replLoadSource = 10;
optional uint64 report_end_time = 8;
optional uint64 report_end_time = 8;
optional uint64 report_end_time = 8;
optional uint64 report_start_time = 7;
optional uint64 report_start_time = 7;
optional uint64 report_start_time = 7;
required bytes request = 4;
required bytes request = 4;
required bytes request = 4;
optional bool request_param = 4;
optional bool request_param = 4;
optional bool request_param = 4;
optional int32 requests = 4;
optional int32 requests = 4;
optional int32 requests = 3;
optional int32 requests = 3;
optional int32 requests = 3;
optional int32 requests = 4;
null if no credentials were provided.
null if no credentials were provided.
null if no user is set.
optional uint32 response = 1;
optional uint32 response = 1;
optional uint32 response = 1;
optional string restVersion = 1;
optional string restVersion = 1;
optional string restVersion = 1;
optional string restVersion = 1;
optional string restVersion = 1;
optional string restVersion = 1;
optional .Result result = 1;
optional .Result result = 1;
optional .Result result = 1;
optional .Result result = 1;
optional .Result result = 1;
optional .Result result = 1;
optional .Result result = 2;
optional .Result result = 2;
optional .Result result = 2;
repeated .RegionActionResult result = 1;
repeated .RegionActionResult result = 1;
repeated .RegionActionResult result = 1;
optional .Result result = 1;
optional .Result result = 1;
optional .Result result = 2;
repeated .RegionActionResult result = 1;
repeated .RegionActionResult result = 1;
repeated .RegionActionResult result = 1;
repeated .RegionActionResult result = 1;
repeated .RegionActionResult result = 1;
repeated .RegionActionResult result = 1;
repeated .RegionActionResult result = 1;
repeated .RegionActionResult result = 1;
optional .Result result = 1;
optional .Result result = 1;
optional .Result result = 1;
optional .Result result = 1;
optional .Result result = 1;
optional .Result result = 1;
optional .Result result = 2;
optional .Result result = 2;
optional .Result result = 2;
repeated .RegionActionResult result = 1;
repeated .RegionActionResult result = 1;
repeated .RegionActionResult result = 1;
repeated .RegionActionResult result = 1;
repeated .RegionActionResult result = 1;
repeated .RegionActionResult result = 1;
repeated .ResultOrException resultOrException = 1;
repeated .ResultOrException resultOrException = 1;
repeated .ResultOrException resultOrException = 1;
repeated .ResultOrException resultOrException = 1;
repeated .ResultOrException resultOrException = 1;
repeated .ResultOrException resultOrException = 1;
repeated .ResultOrException resultOrException = 1;
repeated .ResultOrException resultOrException = 1;
repeated .ResultOrException resultOrException = 1;
repeated .ResultOrException resultOrException = 1;
repeated .ResultOrException resultOrException = 1;
repeated .ResultOrException resultOrException = 1;
repeated .ResultOrException resultOrException = 1;
repeated .ResultOrException resultOrException = 1;
repeated .ResultOrException resultOrException = 1;
repeated .ResultOrException resultOrException = 1;
repeated .ResultOrException resultOrException = 1;
repeated .Result results = 5;
repeated .Result results = 5;
repeated .Result results = 5;
repeated .Result results = 5;
repeated .Result results = 5;
repeated .Result results = 5;
repeated .Result results = 5;
repeated .Result results = 5;
repeated .Result results = 5;
repeated .Result results = 5;
repeated .Result results = 5;
repeated .Result results = 5;
repeated .Result results = 5;
repeated .Result results = 5;
repeated .Result results = 5;
repeated .Result results = 5;
repeated .Result results = 5;
optional bool reversed = 15 [default = false];
optional bool reversed = 15 [default = false];
optional bool reversed = 15 [default = false];
required string revision = 3;
required string revision = 3;
required string revision = 3;
required string revision = 3;
required string revision = 3;
required string revision = 3;
optional uint32 root_index_size_KB = 12;
optional uint32 root_index_size_KB = 12;
optional uint32 root_index_size_KB = 12;
optional int32 rootIndexSizeKB = 9;
optional int32 rootIndexSizeKB = 9;
optional int32 rootIndexSizeKB = 9;
CellUtil.getRowByte(Cell, int)
optional bytes row = 1;
optional bytes row = 1;
optional bytes row = 1;
required bytes row = 1;
required bytes row = 1;
required bytes row = 1;
required bytes row = 1;
required bytes row = 1;
required bytes row = 1;
required bytes row = 1;
required bytes row = 1;
required bytes row = 1;
required bytes row = 1;
required bytes row = 1;
required bytes row = 1;
optional bytes row = 1;
optional bytes row = 1;
optional bytes row = 1;
optional bytes row = 1;
optional bytes row = 1;
optional bytes row = 1;
required uint32 rowBatchSize = 4;
required uint32 rowBatchSize = 4;
required uint32 rowBatchSize = 4;
rpc getRowCount(.CountRequest) returns (.CountResponse);
rpc getRowCount(.CountRequest) returns (.CountResponse);
rpc GetRowNum(.AggregateRequest) returns (.AggregateResponse);
rpc GetRowNum(.AggregateRequest) returns (.AggregateResponse);
required string row_processor_class_name = 1;
required string row_processor_class_name = 1;
required string row_processor_class_name = 1;
required string row_processor_class_name = 1;
required string row_processor_class_name = 1;
required string row_processor_class_name = 1;
optional bytes row_processor_initializer_message = 3;
optional bytes row_processor_initializer_message = 3;
optional bytes row_processor_initializer_message = 3;
optional string row_processor_initializer_message_name = 2;
optional string row_processor_initializer_message_name = 2;
optional string row_processor_initializer_message_name = 2;
optional string row_processor_initializer_message_name = 2;
optional string row_processor_initializer_message_name = 2;
optional string row_processor_initializer_message_name = 2;
required bytes row_processor_result = 1;
required bytes row_processor_result = 1;
required bytes row_processor_result = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;
required uint64 rowsDeleted = 1;
required uint64 rowsDeleted = 1;
required uint64 rowsDeleted = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;
RpcClient.setRpcTimeout(int) and the given
default timeout.
optional uint32 rpc_version = 2;
optional uint32 rpc_version = 2;
optional uint32 rpc_version = 2;
optional uint32 rpc_version = 2;
optional uint32 rpc_version = 2;
optional uint32 rpc_version = 2;
required .Scan scan = 1;
required .Scan scan = 1;
required .Scan scan = 1;
required .Scan scan = 2;
required .Scan scan = 2;
required .Scan scan = 2;
optional .Scan scan = 2;
optional .Scan scan = 2;
optional .Scan scan = 2;
required .Scan scan = 1;
required .Scan scan = 2;
optional .Scan scan = 2;
Scan
object.
Scan
object.
Scan.
Scan.setCaching(int) and Scan.getCaching()
optional uint64 scanner_id = 3;
optional uint64 scanner_id = 3;
optional uint64 scanner_id = 3;
optional uint64 scanner_id = 2;
optional uint64 scanner_id = 2;
optional uint64 scanner_id = 2;
required .Scan scan = 1;
required .Scan scan = 1;
required .Scan scan = 1;
required .Scan scan = 2;
required .Scan scan = 2;
required .Scan scan = 2;
optional .Scan scan = 2;
optional .Scan scan = 2;
optional .Scan scan = 2;
optional int32 scan_result = 1;
optional int32 scan_result = 1;
optional int32 scan_result = 1;
Scan objects.
rpc GetSchemaAlterStatus(.GetSchemaAlterStatusRequest) returns (.GetSchemaAlterStatusResponse);
rpc GetSchemaAlterStatus(.GetSchemaAlterStatusRequest) returns (.GetSchemaAlterStatusResponse);
repeated .FamilyScope scopes = 6;
repeated .FamilyScope scopes = 6;
repeated .FamilyScope scopes = 6;
repeated .FamilyScope scopes = 6;
repeated .FamilyScope scopes = 6;
repeated .FamilyScope scopes = 6;
repeated .FamilyScope scopes = 6;
repeated .FamilyScope scopes = 6;
repeated .FamilyScope scopes = 6;
repeated .FamilyScope scopes = 6;
repeated .FamilyScope scopes = 6;
repeated .FamilyScope scopes = 6;
repeated .FamilyScope scopes = 6;
repeated .FamilyScope scopes = 6;
repeated .FamilyScope scopes = 6;
repeated .FamilyScope scopes = 6;
repeated .FamilyScope scopes = 6;
required .ScopeType scope_type = 2;
required .ScopeType scope_type = 2;
required .ScopeType scope_type = 2;
required bytes second = 2;
required bytes second = 2;
required bytes second = 2;
optional bytes second_part = 2;
optional bytes second_part = 2;
optional bytes second_part = 2;
required uint64 sequence_id = 2;
required uint64 sequence_id = 2;
required uint64 sequence_id = 2;
optional int64 sequence_number = 6;
optional int64 sequence_number = 6;
optional int64 sequence_number = 6;
optional bytes serialized_comparator = 2;
optional bytes serialized_comparator = 2;
optional bytes serialized_comparator = 2;
optional bytes serialized_filter = 2;
optional bytes serialized_filter = 2;
optional bytes serialized_filter = 2;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required uint64 server_current_time = 3;
required uint64 server_current_time = 3;
required uint64 server_current_time = 3;
ClusterStatus.getServers()
rpc GetServerInfo(.GetServerInfoRequest) returns (.GetServerInfoResponse);
rpc GetServerInfo(.GetServerInfoRequest) returns (.GetServerInfoResponse);
required .ServerInfo server_info = 1;
required .ServerInfo server_info = 1;
required .ServerInfo server_info = 1;
required .ServerInfo server_info = 1;
required .ServerInfo server_info = 1;
required .ServerInfo server_info = 1;
required .ServerInfo server_info = 1;
required .ServerLoad server_load = 2;
required .ServerLoad server_load = 2;
required .ServerLoad server_load = 2;
required .ServerLoad server_load = 2;
required .ServerLoad server_load = 2;
required .ServerLoad server_load = 2;
required .ServerLoad server_load = 2;
ServerName from catalog table Result.
required .ServerName server_name = 1;
required .ServerName server_name = 1;
required .ServerName server_name = 1;
required .ServerName server_name = 4;
required .ServerName server_name = 4;
required .ServerName server_name = 4;
required .ServerName server_name = 2;
required .ServerName server_name = 2;
required .ServerName server_name = 2;
required .ServerName server_name = 1;
required .ServerName server_name = 4;
required .ServerName server_name = 2;
required .ServerName server_name = 1;
required .ServerName server_name = 1;
required .ServerName server_name = 1;
required .ServerName server_name = 4;
required .ServerName server_name = 4;
required .ServerName server_name = 4;
required .ServerName server_name = 2;
required .ServerName server_name = 2;
required .ServerName server_name = 2;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
optional uint64 serverStartCode = 5;
optional uint64 serverStartCode = 5;
optional uint64 serverStartCode = 5;
optional uint64 serverStartCode = 2;
optional uint64 serverStartCode = 2;
optional uint64 serverStartCode = 2;
required uint64 server_start_code = 2;
required uint64 server_start_code = 2;
required uint64 server_start_code = 2;
optional string serverVersion = 4;
optional string serverVersion = 4;
optional string serverVersion = 4;
optional string serverVersion = 4;
optional string serverVersion = 4;
optional string serverVersion = 4;
optional bytes service = 3;
optional bytes service = 3;
optional bytes service = 3;
optional string service = 4;
optional string service = 4;
optional string service = 4;
optional string service = 4;
optional string service = 4;
optional string service = 4;
optional .CoprocessorServiceCall service_call = 4;
optional .CoprocessorServiceCall service_call = 4;
optional .CoprocessorServiceCall service_call = 4;
optional .CoprocessorServiceCall service_call = 4;
optional .CoprocessorServiceCall service_call = 4;
optional .CoprocessorServiceCall service_call = 4;
optional .CoprocessorServiceCall service_call = 4;
required string service_name = 2;
required string service_name = 2;
required string service_name = 2;
optional string service_name = 2;
optional string service_name = 2;
optional string service_name = 2;
required string service_name = 2;
required string service_name = 2;
required string service_name = 2;
optional string service_name = 2;
optional string service_name = 2;
optional string service_name = 2;
optional .CoprocessorServiceResult service_result = 4;
optional .CoprocessorServiceResult service_result = 4;
optional .CoprocessorServiceResult service_result = 4;
optional .CoprocessorServiceResult service_result = 4;
optional .CoprocessorServiceResult service_result = 4;
optional .CoprocessorServiceResult service_result = 4;
optional .CoprocessorServiceResult service_result = 4;
TGets (Thrift) into a list of Gets (HBase).
required string signature = 1;
required string signature = 1;
required string signature = 1;
required string signature = 1;
required string signature = 1;
required string signature = 1;
required .SingleColumnValueFilter single_column_value_filter = 1;
required .SingleColumnValueFilter single_column_value_filter = 1;
required .SingleColumnValueFilter single_column_value_filter = 1;
required .SingleColumnValueFilter single_column_value_filter = 1;
required .SingleColumnValueFilter single_column_value_filter = 1;
required .SingleColumnValueFilter single_column_value_filter = 1;
required .SingleColumnValueFilter single_column_value_filter = 1;
ImmutableBytesWritable.getLength() instead
required uint32 sizeOfLogQueue = 3;
required uint32 sizeOfLogQueue = 3;
required uint32 sizeOfLogQueue = 3;
optional bool small = 14;
optional bool small = 14;
optional bool small = 14;
required .SnapshotDescription snapshot = 1;
required .SnapshotDescription snapshot = 1;
required .SnapshotDescription snapshot = 1;
optional .ProcedureDescription snapshot = 2;
optional .ProcedureDescription snapshot = 2;
optional .ProcedureDescription snapshot = 2;
optional .SnapshotDescription snapshot = 1;
optional .SnapshotDescription snapshot = 1;
optional .SnapshotDescription snapshot = 1;
optional .SnapshotDescription snapshot = 1;
optional .SnapshotDescription snapshot = 1;
optional .SnapshotDescription snapshot = 1;
optional .SnapshotDescription snapshot = 2;
optional .SnapshotDescription snapshot = 2;
optional .SnapshotDescription snapshot = 2;
required .SnapshotDescription snapshot = 1;
required .SnapshotDescription snapshot = 1;
required .SnapshotDescription snapshot = 1;
required .SnapshotDescription snapshot = 1;
required .SnapshotDescription snapshot = 1;
required .SnapshotDescription snapshot = 1;
required .SnapshotDescription snapshot = 1;
optional .ProcedureDescription snapshot = 2;
optional .SnapshotDescription snapshot = 1;
optional .SnapshotDescription snapshot = 1;
optional .SnapshotDescription snapshot = 2;
required .SnapshotDescription snapshot = 1;
required .SnapshotDescription snapshot = 1;
required .SnapshotDescription snapshot = 1;
required .SnapshotDescription snapshot = 1;
required .SnapshotDescription snapshot = 1;
optional .ProcedureDescription snapshot = 2;
optional .ProcedureDescription snapshot = 2;
optional .ProcedureDescription snapshot = 2;
optional .SnapshotDescription snapshot = 1;
optional .SnapshotDescription snapshot = 1;
optional .SnapshotDescription snapshot = 1;
optional .SnapshotDescription snapshot = 1;
optional .SnapshotDescription snapshot = 1;
optional .SnapshotDescription snapshot = 1;
optional .SnapshotDescription snapshot = 2;
optional .SnapshotDescription snapshot = 2;
optional .SnapshotDescription snapshot = 2;
required .SnapshotDescription snapshot = 1;
required .SnapshotDescription snapshot = 1;
required .SnapshotDescription snapshot = 1;
required .SnapshotDescription snapshot = 1;
required .SnapshotDescription snapshot = 1;
required .SnapshotDescription snapshot = 1;
repeated .SnapshotDescription snapshots = 1;
repeated .SnapshotDescription snapshots = 1;
repeated .SnapshotDescription snapshots = 1;
repeated .SnapshotDescription snapshots = 1;
repeated .SnapshotDescription snapshots = 1;
repeated .SnapshotDescription snapshots = 1;
repeated .SnapshotDescription snapshots = 1;
repeated .SnapshotDescription snapshots = 1;
repeated .SnapshotDescription snapshots = 1;
repeated .SnapshotDescription snapshots = 1;
repeated .SnapshotDescription snapshots = 1;
repeated .SnapshotDescription snapshots = 1;
repeated .SnapshotDescription snapshots = 1;
repeated .SnapshotDescription snapshots = 1;
repeated .SnapshotDescription snapshots = 1;
repeated .SnapshotDescription snapshots = 1;
repeated .SnapshotDescription snapshots = 1;
repeated bytes sorted_prefixes = 1;
repeated bytes sorted_prefixes = 1;
repeated bytes sorted_prefixes = 1;
repeated bytes sorted_prefixes = 1;
repeated bytes sorted_prefixes = 1;
repeated bytes sorted_prefixes = 1;
repeated bytes sorted_prefixes = 1;
repeated bytes sorted_prefixes = 1;
repeated bytes sorted_prefixes = 1;
optional string source = 1;
optional string source = 1;
optional string source = 1;
optional string source = 1;
optional string source = 1;
optional string source = 1;
required .RegionSpecifier spec = 1;
required .RegionSpecifier spec = 1;
required .RegionSpecifier spec = 1;
required .RegionSpecifier spec = 1;
required .RegionSpecifier spec = 1;
required .RegionSpecifier spec = 1;
required .RegionSpecifier spec = 1;
optional bool split = 6;
optional bool split = 6;
optional bool split = 6;
required bytes splitkey = 1;
required bytes splitkey = 1;
required bytes splitkey = 1;
repeated bytes split_keys = 2;
repeated bytes split_keys = 2;
repeated bytes split_keys = 2;
repeated bytes split_keys = 2;
repeated bytes split_keys = 2;
repeated bytes split_keys = 2;
repeated bytes split_keys = 2;
repeated bytes split_keys = 2;
repeated bytes split_keys = 2;
optional bytes split_point = 2;
optional bytes split_point = 2;
optional bytes split_point = 2;
required string src_checksum = 6;
required string src_checksum = 6;
required string src_checksum = 6;
required string src_checksum = 6;
required string src_checksum = 6;
required string src_checksum = 6;
optional string stack_trace = 2;
optional string stack_trace = 2;
optional string stack_trace = 2;
optional string stack_trace = 2;
optional string stack_trace = 2;
optional string stack_trace = 2;
optional uint64 stamp = 3;
optional uint64 stamp = 3;
optional uint64 stamp = 3;
optional uint64 start_code = 3;
optional uint64 start_code = 3;
optional uint64 start_code = 3;
optional int64 startCode = 2;
optional int64 startCode = 2;
optional int64 startCode = 2;
required string start_date = 1;
required string start_date = 1;
required string start_date = 1;
required string start_date = 1;
required string start_date = 1;
required string start_date = 1;
optional bytes start_key = 3;
optional bytes start_key = 3;
optional bytes start_key = 3;
optional bytes startKey = 2;
optional bytes startKey = 2;
optional bytes startKey = 2;
optional bytes start_row = 3;
optional bytes start_row = 3;
optional bytes start_row = 3;
optional bytes startRow = 1;
optional bytes startRow = 1;
optional bytes startRow = 1;
optional int64 startTime = 5;
optional int64 startTime = 5;
optional int64 startTime = 5;
required .RegionState.State state = 2;
required .RegionState.State state = 2;
required .RegionState.State state = 2;
optional .RegionState.State state = 3;
optional .RegionState.State state = 3;
optional .RegionState.State state = 3;
required .ReplicationState.State state = 1;
required .ReplicationState.State state = 1;
required .ReplicationState.State state = 1;
required .SplitLogTask.State state = 1;
required .SplitLogTask.State state = 1;
required .SplitLogTask.State state = 1;
required .Table.State state = 1 [default = ENABLED];
required .Table.State state = 1 [default = ENABLED];
required .Table.State state = 1 [default = ENABLED];
rpc GetStd(.AggregateRequest) returns (.AggregateResponse);
rpc GetStd(.AggregateRequest) returns (.AggregateResponse);
optional bytes stop_row = 4;
optional bytes stop_row = 4;
optional bytes stop_row = 4;
optional bytes stop_row_key = 1;
optional bytes stop_row_key = 1;
optional bytes stop_row_key = 1;
rpc GetStoreFile(.GetStoreFileRequest) returns (.GetStoreFileResponse);
rpc GetStoreFile(.GetStoreFileRequest) returns (.GetStoreFileResponse);
repeated string store_file = 1;
repeated string store_file = 1;
repeated string store_file = 1;
repeated string store_file = 1;
repeated string store_file = 1;
repeated string store_file = 1;
repeated string store_file = 1;
repeated string store_file = 1;
repeated string store_file = 1;
optional uint32 storefile_index_size_MB = 7;
optional uint32 storefile_index_size_MB = 7;
optional uint32 storefile_index_size_MB = 7;
optional int32 storefileIndexSizeMB = 6;
optional int32 storefileIndexSizeMB = 6;
optional int32 storefileIndexSizeMB = 6;
repeated string store_file = 1;
repeated string store_file = 1;
repeated string store_file = 1;
optional uint32 storefiles = 3;
optional uint32 storefiles = 3;
optional uint32 storefiles = 3;
repeated .SnapshotRegionManifest.StoreFile store_files = 2;
repeated .SnapshotRegionManifest.StoreFile store_files = 2;
repeated .SnapshotRegionManifest.StoreFile store_files = 2;
optional int32 storefiles = 3;
optional int32 storefiles = 3;
optional int32 storefiles = 3;
repeated .SnapshotRegionManifest.StoreFile store_files = 2;
repeated .SnapshotRegionManifest.StoreFile store_files = 2;
repeated .SnapshotRegionManifest.StoreFile store_files = 2;
repeated .SnapshotRegionManifest.StoreFile store_files = 2;
repeated .SnapshotRegionManifest.StoreFile store_files = 2;
optional uint32 storefile_size_MB = 5;
optional uint32 storefile_size_MB = 5;
optional uint32 storefile_size_MB = 5;
optional int32 storefileSizeMB = 4;
optional int32 storefileSizeMB = 4;
optional int32 storefileSizeMB = 4;
repeated .SnapshotRegionManifest.StoreFile store_files = 2;
repeated .SnapshotRegionManifest.StoreFile store_files = 2;
repeated .SnapshotRegionManifest.StoreFile store_files = 2;
repeated .SnapshotRegionManifest.StoreFile store_files = 2;
repeated .SnapshotRegionManifest.StoreFile store_files = 2;
repeated .SnapshotRegionManifest.StoreFile store_files = 2;
repeated .SnapshotRegionManifest.StoreFile store_files = 2;
repeated .SnapshotRegionManifest.StoreFile store_files = 2;
repeated .SnapshotRegionManifest.StoreFile store_files = 2;
required string store_home_dir = 6;
required string store_home_dir = 6;
required string store_home_dir = 6;
required string store_home_dir = 6;
required string store_home_dir = 6;
required string store_home_dir = 6;
optional uint32 store_limit = 8;
optional uint32 store_limit = 8;
optional uint32 store_limit = 8;
optional uint32 store_limit = 11;
optional uint32 store_limit = 11;
optional uint32 store_limit = 11;
optional uint32 store_offset = 9;
optional uint32 store_offset = 9;
optional uint32 store_offset = 9;
optional uint32 store_offset = 12;
optional uint32 store_offset = 12;
optional uint32 store_offset = 12;
optional uint32 stores = 2;
optional uint32 stores = 2;
optional uint32 stores = 2;
optional int32 stores = 2;
optional int32 stores = 2;
optional int32 stores = 2;
repeated .StoreSequenceId store_sequence_id = 2;
repeated .StoreSequenceId store_sequence_id = 2;
repeated .StoreSequenceId store_sequence_id = 2;
repeated .StoreSequenceId store_sequence_id = 2;
repeated .StoreSequenceId store_sequence_id = 2;
repeated .StoreSequenceId store_sequence_id = 2;
repeated .StoreSequenceId store_sequence_id = 2;
repeated .StoreSequenceId store_sequence_id = 2;
repeated .StoreSequenceId store_sequence_id = 2;
repeated .StoreSequenceId store_sequence_id = 2;
repeated .StoreSequenceId store_sequence_id = 2;
repeated .StoreSequenceId store_sequence_id = 2;
repeated .StoreSequenceId store_sequence_id = 2;
repeated .StoreSequenceId store_sequence_id = 2;
repeated .StoreSequenceId store_sequence_id = 2;
repeated .StoreSequenceId store_sequence_id = 2;
repeated .StoreSequenceId store_sequence_id = 2;
optional uint32 store_uncompressed_size_MB = 4;
optional uint32 store_uncompressed_size_MB = 4;
optional uint32 store_uncompressed_size_MB = 4;
required string substr = 1;
required string substr = 1;
required string substr = 1;
required string substr = 1;
required string substr = 1;
required string substr = 1;
rpc GetSum(.AggregateRequest) returns (.AggregateResponse);
rpc GetSum(.AggregateRequest) returns (.AggregateResponse);
optional bool synchronous = 2;
optional bool synchronous = 2;
optional bool synchronous = 2;
optional string table = 2;
optional string table = 2;
optional string table = 2;
optional .TableSchema table = 3;
optional .TableSchema table = 3;
optional .TableSchema table = 3;
optional .TableSchema table = 3;
optional string table = 2;
optional string table = 2;
optional string table = 2;
table descriptor for this table.
table descriptor for this table.
rpc GetTableDescriptors(.GetTableDescriptorsRequest) returns (.GetTableDescriptorsResponse);
rpc GetTableDescriptors(.GetTableDescriptorsRequest) returns (.GetTableDescriptorsResponse);
Path object representing the table directory under
path rootdir
ClientScanner.getTable()
optional .TableName table_name = 2;
optional .TableName table_name = 2;
optional .TableName table_name = 2;
optional .TableName table_name = 1;
optional .TableName table_name = 1;
optional .TableName table_name = 1;
required .TableName table_name = 2;
required .TableName table_name = 2;
required .TableName table_name = 2;
optional .TableName table_name = 1;
optional .TableName table_name = 1;
optional .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
repeated .TableName tableName = 1;
repeated .TableName tableName = 1;
repeated .TableName tableName = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName tableName = 1;
required .TableName tableName = 1;
required .TableName tableName = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required bytes table_name = 1;
required bytes table_name = 1;
required bytes table_name = 1;
required bytes table_name = 2;
required bytes table_name = 2;
required bytes table_name = 2;
optional .TableName table_name = 1;
optional .TableName table_name = 1;
optional .TableName table_name = 1;
TableName object representing
the table directory under
path rootdir
optional .TableName table_name = 2;
optional .TableName table_name = 1;
required .TableName table_name = 2;
optional .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
repeated .TableName tableName = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName tableName = 1;
required .TableName table_name = 1;
optional .TableName table_name = 1;
repeated .TableName tableName = 1;
repeated .TableName tableName = 1;
repeated .TableName tableName = 1;
repeated .TableName tableName = 1;
repeated .TableName tableName = 1;
repeated .TableName tableName = 1;
repeated .TableName tableName = 1;
optional .TableName table_name = 2;
optional .TableName table_name = 2;
optional .TableName table_name = 2;
optional .TableName table_name = 1;
optional .TableName table_name = 1;
optional .TableName table_name = 1;
required .TableName table_name = 2;
required .TableName table_name = 2;
required .TableName table_name = 2;
optional .TableName table_name = 1;
optional .TableName table_name = 1;
optional .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
repeated .TableName tableName = 1;
repeated .TableName tableName = 1;
repeated .TableName tableName = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName tableName = 1;
required .TableName tableName = 1;
required .TableName tableName = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
optional .TableName table_name = 1;
optional .TableName table_name = 1;
optional .TableName table_name = 1;
repeated .TableName tableName = 1;
repeated .TableName tableName = 1;
repeated .TableName tableName = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
rpc GetTableNames(.GetTableNamesRequest) returns (.GetTableNamesResponse);
rpc GetTableNames(.GetTableNamesRequest) returns (.GetTableNamesResponse);
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
optional .TableSchema table = 3;
optional .TableSchema table = 3;
optional .TableSchema table = 3;
optional .TablePermission table_permission = 4;
optional .TablePermission table_permission = 4;
optional .TablePermission table_permission = 4;
optional .TablePermission table_permission = 4;
optional .TablePermission table_permission = 4;
optional .TablePermission table_permission = 4;
optional .TablePermission table_permission = 4;
required .TableSchema table_schema = 1;
required .TableSchema table_schema = 1;
required .TableSchema table_schema = 1;
repeated .TableSchema table_schema = 1;
repeated .TableSchema table_schema = 1;
repeated .TableSchema table_schema = 1;
repeated .TableSchema tableSchema = 1;
repeated .TableSchema tableSchema = 1;
repeated .TableSchema tableSchema = 1;
required .TableSchema table_schema = 2;
required .TableSchema table_schema = 2;
required .TableSchema table_schema = 2;
required .TableSchema table_schema = 1;
required .TableSchema table_schema = 1;
required .TableSchema table_schema = 1;
required .TableSchema table_schema = 1;
repeated .TableSchema table_schema = 1;
repeated .TableSchema tableSchema = 1;
required .TableSchema table_schema = 2;
required .TableSchema table_schema = 1;
repeated .TableSchema table_schema = 1;
repeated .TableSchema tableSchema = 1;
repeated .TableSchema table_schema = 1;
repeated .TableSchema table_schema = 1;
repeated .TableSchema table_schema = 1;
repeated .TableSchema tableSchema = 1;
repeated .TableSchema tableSchema = 1;
repeated .TableSchema tableSchema = 1;
repeated .TableSchema table_schema = 1;
repeated .TableSchema table_schema = 1;
repeated .TableSchema table_schema = 1;
repeated .TableSchema tableSchema = 1;
repeated .TableSchema tableSchema = 1;
repeated .TableSchema tableSchema = 1;
required .TableSchema table_schema = 1;
required .TableSchema table_schema = 1;
required .TableSchema table_schema = 1;
repeated .TableSchema table_schema = 1;
repeated .TableSchema table_schema = 1;
repeated .TableSchema table_schema = 1;
repeated .TableSchema tableSchema = 1;
repeated .TableSchema tableSchema = 1;
repeated .TableSchema tableSchema = 1;
required .TableSchema table_schema = 2;
required .TableSchema table_schema = 2;
required .TableSchema table_schema = 2;
required .TableSchema table_schema = 1;
required .TableSchema table_schema = 1;
required .TableSchema table_schema = 1;
repeated .TableSchema table_schema = 1;
repeated .TableSchema table_schema = 1;
repeated .TableSchema table_schema = 1;
repeated .TableSchema tableSchema = 1;
repeated .TableSchema tableSchema = 1;
repeated .TableSchema tableSchema = 1;
optional bytes tags = 7;
optional bytes tags = 7;
optional bytes tags = 7;
optional bytes tags = 7;
optional bytes tags = 7;
optional bytes tags = 7;
optional bytes tags = 5;
optional bytes tags = 5;
optional bytes tags = 5;
Cell.getTagsLengthUnsigned() which can handle tags length upto 65535.
optional int64 thread_id = 3;
optional int64 thread_id = 3;
optional int64 thread_id = 3;
optional .TimeRange time_range = 5;
optional .TimeRange time_range = 5;
optional .TimeRange time_range = 5;
optional .TimeRange time_range = 7;
optional .TimeRange time_range = 7;
optional .TimeRange time_range = 7;
optional .TimeRange time_range = 6;
optional .TimeRange time_range = 6;
optional .TimeRange time_range = 6;
optional .TimeRange time_range = 5;
optional .TimeRange time_range = 7;
optional .TimeRange time_range = 6;
optional .TimeRange time_range = 5;
optional .TimeRange time_range = 5;
optional .TimeRange time_range = 5;
optional .TimeRange time_range = 7;
optional .TimeRange time_range = 7;
optional .TimeRange time_range = 7;
optional .TimeRange time_range = 6;
optional .TimeRange time_range = 6;
optional .TimeRange time_range = 6;
optional uint64 timestamp = 3;
optional uint64 timestamp = 3;
optional uint64 timestamp = 3;
optional uint64 timestamp = 4;
optional uint64 timestamp = 4;
optional uint64 timestamp = 4;
optional uint64 timestamp = 4;
optional uint64 timestamp = 4;
optional uint64 timestamp = 4;
optional uint64 timestamp = 4;
optional uint64 timestamp = 3;
optional uint64 timestamp = 3;
optional uint64 timestamp = 3;
optional uint64 timestamp = 4;
optional uint64 timestamp = 4;
optional int64 timestamp = 3;
optional int64 timestamp = 3;
optional int64 timestamp = 3;
required uint64 timeStampOfLastShippedOp = 4;
required uint64 timeStampOfLastShippedOp = 4;
required uint64 timeStampOfLastShippedOp = 4;
repeated int64 timestamps = 1 [packed = true];
repeated int64 timestamps = 1 [packed = true];
repeated int64 timestamps = 1 [packed = true];
repeated int64 timestamps = 1 [packed = true];
repeated int64 timestamps = 1 [packed = true];
repeated int64 timestamps = 1 [packed = true];
repeated int64 timestamps = 1 [packed = true];
repeated int64 timestamps = 1 [packed = true];
repeated int64 timestamps = 1 [packed = true];
required uint64 timeStampsOfLastAppliedOp = 2;
required uint64 timeStampsOfLastAppliedOp = 2;
required uint64 timeStampsOfLastAppliedOp = 2;
optional uint64 to = 2;
optional uint64 to = 2;
optional uint64 to = 2;
optional .Token token = 1;
optional .Token token = 1;
optional .Token token = 1;
optional .Token token = 1;
optional .Token token = 1;
optional .Token token = 1;
optional .Token token = 1;
optional uint64 total_compacting_KVs = 10;
optional uint64 total_compacting_KVs = 10;
optional uint64 total_compacting_KVs = 10;
optional int64 totalCompactingKVs = 12;
optional int64 totalCompactingKVs = 12;
optional int64 totalCompactingKVs = 12;
optional uint32 total_number_of_requests = 2;
optional uint32 total_number_of_requests = 2;
optional uint32 total_number_of_requests = 2;
optional uint32 total_regions = 2;
optional uint32 total_regions = 2;
optional uint32 total_regions = 2;
optional uint32 total_static_bloom_size_KB = 14;
optional uint32 total_static_bloom_size_KB = 14;
optional uint32 total_static_bloom_size_KB = 14;
optional int32 totalStaticBloomSizeKB = 11;
optional int32 totalStaticBloomSizeKB = 11;
optional int32 totalStaticBloomSizeKB = 11;
optional uint32 total_static_index_size_KB = 13;
optional uint32 total_static_index_size_KB = 13;
optional uint32 total_static_index_size_KB = 13;
optional int32 totalStaticIndexSizeKB = 10;
optional int32 totalStaticIndexSizeKB = 10;
optional int32 totalStaticIndexSizeKB = 10;
optional uint64 total_uncompressed_bytes = 4;
optional uint64 total_uncompressed_bytes = 4;
optional uint64 total_uncompressed_bytes = 4;
repeated .StackTraceElementMessage trace = 4;
repeated .StackTraceElementMessage trace = 4;
repeated .StackTraceElementMessage trace = 4;
repeated .StackTraceElementMessage trace = 4;
repeated .StackTraceElementMessage trace = 4;
repeated .StackTraceElementMessage trace = 4;
repeated .StackTraceElementMessage trace = 4;
repeated .StackTraceElementMessage trace = 4;
optional int64 trace_id = 1;
optional int64 trace_id = 1;
optional int64 trace_id = 1;
optional .RPCTInfo trace_info = 2;
optional .RPCTInfo trace_info = 2;
optional .RPCTInfo trace_info = 2;
optional .RPCTInfo trace_info = 2;
optional .RPCTInfo trace_info = 2;
optional .RPCTInfo trace_info = 2;
optional .RPCTInfo trace_info = 2;
repeated .StackTraceElementMessage trace = 4;
repeated .StackTraceElementMessage trace = 4;
repeated .StackTraceElementMessage trace = 4;
repeated .StackTraceElementMessage trace = 4;
repeated .StackTraceElementMessage trace = 4;
repeated .StackTraceElementMessage trace = 4;
repeated .StackTraceElementMessage trace = 4;
repeated .StackTraceElementMessage trace = 4;
repeated .StackTraceElementMessage trace = 4;
repeated .RegionStateTransition transition = 2;
repeated .RegionStateTransition transition = 2;
repeated .RegionStateTransition transition = 2;
repeated .RegionStateTransition transition = 2;
repeated .RegionStateTransition transition = 2;
required .RegionStateTransition.TransitionCode transition_code = 1;
required .RegionStateTransition.TransitionCode transition_code = 1;
required .RegionStateTransition.TransitionCode transition_code = 1;
repeated .RegionStateTransition transition = 2;
repeated .RegionStateTransition transition = 2;
repeated .RegionStateTransition transition = 2;
optional bool transition_in_ZK = 3 [default = true];
optional bool transition_in_ZK = 3 [default = true];
optional bool transition_in_ZK = 3 [default = true];
repeated .RegionStateTransition transition = 2;
repeated .RegionStateTransition transition = 2;
repeated .RegionStateTransition transition = 2;
repeated .RegionStateTransition transition = 2;
repeated .RegionStateTransition transition = 2;
repeated .RegionStateTransition transition = 2;
repeated .RegionStateTransition transition = 2;
repeated .RegionStateTransition transition = 2;
repeated .RegionStateTransition transition = 2;
optional uint32 ttl = 4;
optional uint32 ttl = 4;
optional uint32 ttl = 4;
optional int32 ttl = 3;
optional int32 ttl = 3;
optional int32 ttl = 3;
optional .Permission.Type type = 1;
optional .Permission.Type type = 1;
optional .Permission.Type type = 1;
required .Permission.Type type = 1;
required .Permission.Type type = 1;
required .Permission.Type type = 1;
required .RegionSpecifier.RegionSpecifierType type = 1;
required .RegionSpecifier.RegionSpecifierType type = 1;
required .RegionSpecifier.RegionSpecifierType type = 1;
optional .SnapshotDescription.Type type = 4 [default = FLUSH];
optional .SnapshotDescription.Type type = 4 [default = FLUSH];
optional .SnapshotDescription.Type type = 4 [default = FLUSH];
required .SnapshotFileInfo.Type type = 1;
required .SnapshotFileInfo.Type type = 1;
required .SnapshotFileInfo.Type type = 1;
optional uint64 uncompressed_data_index_size = 3;
optional uint64 uncompressed_data_index_size = 3;
optional uint64 uncompressed_data_index_size = 3;
repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1;
repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1;
repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1;
repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1;
repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1;
repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1;
repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1;
repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1;
repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1;
repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1;
repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1;
repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1;
repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1;
repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1;
repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1;
repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1;
repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1;
required string url = 2;
required string url = 2;
required string url = 2;
required string url = 2;
required string url = 2;
required string url = 2;
optional uint32 used_heap_MB = 3;
optional uint32 used_heap_MB = 3;
optional uint32 used_heap_MB = 3;
required bytes user = 1;
required bytes user = 1;
required bytes user = 1;
required bytes user = 1;
required bytes user = 1;
required bytes user = 1;
required string user = 4;
required string user = 4;
required string user = 4;
required bytes user = 1;
required bytes user = 1;
required bytes user = 1;
required bytes user = 1;
required bytes user = 1;
required bytes user = 1;
required bytes user = 1;
required bytes user = 1;
required bytes user = 1;
required bytes user = 1;
required bytes user = 1;
required bytes user = 1;
repeated .UserAuthorizations userAuths = 1;
repeated .UserAuthorizations userAuths = 1;
repeated .UserAuthorizations userAuths = 1;
repeated .UserAuthorizations userAuths = 1;
repeated .UserAuthorizations userAuths = 1;
repeated .UserAuthorizations userAuths = 1;
repeated .UserAuthorizations userAuths = 1;
repeated .UserAuthorizations userAuths = 1;
repeated .UserAuthorizations userAuths = 1;
repeated .UserAuthorizations userAuths = 1;
repeated .UserAuthorizations userAuths = 1;
repeated .UserAuthorizations userAuths = 1;
repeated .UserAuthorizations userAuths = 1;
repeated .UserAuthorizations userAuths = 1;
repeated .UserAuthorizations userAuths = 1;
repeated .UserAuthorizations userAuths = 1;
repeated .UserAuthorizations userAuths = 1;
required string user = 4;
required string user = 4;
required string user = 4;
optional .UserInformation user_info = 1;
optional .UserInformation user_info = 1;
optional .UserInformation user_info = 1;
optional .UserInformation user_info = 1;
optional .UserInformation user_info = 1;
optional .UserInformation user_info = 1;
optional .UserInformation user_info = 1;
required bytes username = 2;
required bytes username = 2;
required bytes username = 2;
optional string username = 1;
optional string username = 1;
optional string username = 1;
optional string username = 1;
optional string username = 1;
optional string username = 1;
repeated .UserPermission user_permission = 1;
repeated .UserPermission user_permission = 1;
repeated .UserPermission user_permission = 1;
required .UserPermission user_permission = 1;
required .UserPermission user_permission = 1;
required .UserPermission user_permission = 1;
required .UserPermission user_permission = 1;
required .UserPermission user_permission = 1;
required .UserPermission user_permission = 1;
repeated .UserPermission user_permission = 1;
required .UserPermission user_permission = 1;
required .UserPermission user_permission = 1;
repeated .UserPermission user_permission = 1;
repeated .UserPermission user_permission = 1;
repeated .UserPermission user_permission = 1;
repeated .UserPermission user_permission = 1;
repeated .UserPermission user_permission = 1;
repeated .UserPermission user_permission = 1;
repeated .UserPermission user_permission = 1;
repeated .UserPermission user_permission = 1;
repeated .UserPermission user_permission = 1;
repeated .UserPermission user_permission = 1;
required .UserPermission user_permission = 1;
required .UserPermission user_permission = 1;
required .UserPermission user_permission = 1;
required .UserPermission user_permission = 1;
required .UserPermission user_permission = 1;
required .UserPermission user_permission = 1;
repeated .UserPermission user_permission = 1;
repeated .UserPermission user_permission = 1;
repeated .UserPermission user_permission = 1;
rpc GetUserPermissions(.GetUserPermissionsRequest) returns (.GetUserPermissionsResponse);
rpc GetUserPermissions(.GetUserPermissionsRequest) returns (.GetUserPermissionsResponse);
repeated .UsersAndPermissions.UserPermissions user_permissions = 1;
repeated .UsersAndPermissions.UserPermissions user_permissions = 1;
repeated .UsersAndPermissions.UserPermissions user_permissions = 1;
repeated .UsersAndPermissions.UserPermissions user_permissions = 1;
repeated .UsersAndPermissions.UserPermissions user_permissions = 1;
repeated .UsersAndPermissions.UserPermissions user_permissions = 1;
repeated .UsersAndPermissions.UserPermissions user_permissions = 1;
repeated .UsersAndPermissions.UserPermissions user_permissions = 1;
repeated .UsersAndPermissions.UserPermissions user_permissions = 1;
repeated .UsersAndPermissions.UserPermissions user_permissions = 1;
repeated .UsersAndPermissions.UserPermissions user_permissions = 1;
repeated .UsersAndPermissions.UserPermissions user_permissions = 1;
repeated .UsersAndPermissions.UserPermissions user_permissions = 1;
repeated .UsersAndPermissions.UserPermissions user_permissions = 1;
repeated .UsersAndPermissions.UserPermissions user_permissions = 1;
repeated .UsersAndPermissions.UserPermissions user_permissions = 1;
repeated .UsersAndPermissions.UserPermissions user_permissions = 1;
CellUtil.cloneValue(Cell)
ColumnInterpreter.getValue(byte[], byte[], KeyValue), this method should be made abstract
optional bytes value = 6;
optional bytes value = 6;
optional bytes value = 6;
optional bytes value = 6;
optional bytes value = 6;
optional bytes value = 6;
required .NameBytesPair value = 2;
required .NameBytesPair value = 2;
required .NameBytesPair value = 2;
optional .NameBytesPair value = 1;
optional .NameBytesPair value = 1;
optional .NameBytesPair value = 1;
optional bytes value = 2;
optional bytes value = 2;
optional bytes value = 2;
optional bytes value = 1;
optional bytes value = 1;
optional bytes value = 1;
optional bytes value = 2;
optional bytes value = 2;
optional bytes value = 2;
optional int64 value = 2;
optional int64 value = 2;
optional int64 value = 2;
required string value = 2;
required string value = 2;
required string value = 2;
required bytes value = 2;
required bytes value = 2;
required bytes value = 2;
required bool value = 1;
required bool value = 1;
required bool value = 1;
required string value = 2;
required string value = 2;
required string value = 2;
required string value = 2;
required string value = 2;
required string value = 2;
ByteBuffer.
ByteBuffer.
required .NameBytesPair value = 2;
optional .NameBytesPair value = 1;
required string value = 2;
required string value = 2;
required string value = 2;
required string value = 2;
required string value = 2;
required string value = 2;
required string value = 2;
required string value = 2;
required string value = 2;
required .NameBytesPair value = 2;
required .NameBytesPair value = 2;
required .NameBytesPair value = 2;
optional .NameBytesPair value = 1;
optional .NameBytesPair value = 1;
optional .NameBytesPair value = 1;
HTableDescriptor.values map.
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;
required string version = 1;
required string version = 1;
required string version = 1;
optional int32 version = 5;
optional int32 version = 5;
optional int32 version = 5;
required string version = 1;
required string version = 1;
required string version = 1;
optional int32 version = 1;
optional int32 version = 1;
optional int32 version = 1;
required string version = 1;
required string version = 1;
required string version = 1;
required string version = 1;
required string version = 1;
required string version = 1;
optional .VersionInfo version_info = 5;
optional .VersionInfo version_info = 5;
optional .VersionInfo version_info = 5;
optional .VersionInfo version_info = 5;
optional .VersionInfo version_info = 5;
optional .VersionInfo version_info = 5;
optional .VersionInfo version_info = 5;
optional uint32 version_of_closing_node = 2;
optional uint32 version_of_closing_node = 2;
optional uint32 version_of_closing_node = 2;
optional uint32 version_of_offline_node = 2;
optional uint32 version_of_offline_node = 2;
optional uint32 version_of_offline_node = 2;
optional uint64 versionsDeleted = 2;
optional uint64 versionsDeleted = 2;
optional uint64 versionsDeleted = 2;
repeated .VisibilityLabel visLabel = 1;
repeated .VisibilityLabel visLabel = 1;
repeated .VisibilityLabel visLabel = 1;
repeated .VisibilityLabel visLabel = 1;
repeated .VisibilityLabel visLabel = 1;
repeated .VisibilityLabel visLabel = 1;
repeated .VisibilityLabel visLabel = 1;
repeated .VisibilityLabel visLabel = 1;
repeated .VisibilityLabel visLabel = 1;
repeated .VisibilityLabel visLabel = 1;
repeated .VisibilityLabel visLabel = 1;
repeated .VisibilityLabel visLabel = 1;
repeated .VisibilityLabel visLabel = 1;
repeated .VisibilityLabel visLabel = 1;
repeated .VisibilityLabel visLabel = 1;
repeated .VisibilityLabel visLabel = 1;
repeated .VisibilityLabel visLabel = 1;
HRegionServer.instantiateHLog(Path, String) setting up WAL instance.
optional string wal_name = 5;
optional string wal_name = 5;
optional string wal_name = 5;
optional string wal_name = 5;
optional string wal_name = 5;
optional string wal_name = 5;
optional string wal_server = 4;
optional string wal_server = 4;
optional string wal_server = 4;
optional string wal_server = 4;
optional string wal_server = 4;
optional string wal_server = 4;
path files.
optional uint32 webui_port = 2;
optional uint32 webui_port = 2;
optional uint32 webui_port = 2;
Writable.readFields(java.io.DataInput).
Writable.readFields(java.io.DataInput).
optional string writer_cls_name = 4;
optional string writer_cls_name = 4;
optional string writer_cls_name = 4;
optional string writer_cls_name = 4;
optional string writer_cls_name = 4;
optional string writer_cls_name = 4;
optional uint64 write_requests_count = 9;
optional uint64 write_requests_count = 9;
optional uint64 write_requests_count = 9;
optional int64 writeRequestsCount = 8;
optional int64 writeRequestsCount = 8;
optional int64 writeRequestsCount = 8;
HFile writers
HFile writers.
required uint64 write_time = 4;
required uint64 write_time = 4;
required uint64 write_time = 4;
Mutation.getDurability() instead.
optional uint32 yet_to_update_regions = 1;
optional uint32 yet_to_update_regions = 1;
optional uint32 yet_to_update_regions = 1;
Global = 1;
rpc Grant(.GrantRequest) returns (.GrantResponse);
rpc Grant(.GrantRequest) returns (.GrantResponse);
AccessControlClient.grant(Configuration, TableName, String, byte[], byte[], Permission.Action...) instead.
GREATER_OR_EQUAL = 4;
GREATER = 5;
HFile.Reader that serves up
either the top or bottom half of a HFile where 'bottom' is the first half
of the file containing the keys that sort lowest and 'top' is the second half
of the file with keys that sort greater than those of the bottom half.required uint64 ageOfLastAppliedOp = 1;
required uint64 ageOfLastAppliedOp = 1;
required uint64 ageOfLastAppliedOp = 1;
required uint64 ageOfLastShippedOp = 2;
required uint64 ageOfLastShippedOp = 2;
required uint64 ageOfLastShippedOp = 2;
required string algorithm = 1;
required string algorithm = 1;
required string algorithm = 1;
optional bool assign_seq_num = 3;
optional bool assign_seq_num = 3;
optional bool assign_seq_num = 3;
optional bool assign_seq_num = 2;
optional bool assign_seq_num = 2;
optional bool assign_seq_num = 2;
optional int32 associated_cell_count = 3;
optional int32 associated_cell_count = 3;
optional int32 associated_cell_count = 3;
optional int32 associated_cell_count = 8;
optional int32 associated_cell_count = 8;
optional int32 associated_cell_count = 8;
optional int32 associated_cell_count = 2;
optional int32 associated_cell_count = 2;
optional int32 associated_cell_count = 2;
optional bool atomic = 2;
optional bool atomic = 2;
optional bool atomic = 2;
optional string auth_method = 2;
optional string auth_method = 2;
optional string auth_method = 2;
optional double averageLoad = 5;
optional double averageLoad = 5;
optional double averageLoad = 5;
optional bool balancer_on = 9;
optional bool balancer_on = 9;
optional bool balancer_on = 9;
optional bool balancer_on = 1;
optional bool balancer_on = 1;
optional bool balancer_on = 1;
required bool balancer_ran = 1;
required bool balancer_ran = 1;
required bool balancer_ran = 1;
optional int32 batch = 4;
optional int32 batch = 4;
optional int32 batch = 4;
optional uint32 batch_size = 9;
optional uint32 batch_size = 9;
optional uint32 batch_size = 9;
required bytes bigdecimal_msg = 1;
required bytes bigdecimal_msg = 1;
required bytes bigdecimal_msg = 1;
required .BitComparator.BitwiseOp bitwise_op = 2;
required .BitComparator.BitwiseOp bitwise_op = 2;
required .BitComparator.BitwiseOp bitwise_op = 2;
required string bulk_token = 1;
required string bulk_token = 1;
required string bulk_token = 1;
required string bulk_token = 1;
required string bulk_token = 1;
required string bulk_token = 1;
required string bulk_token = 4;
required string bulk_token = 4;
required string bulk_token = 4;
optional bool cache_blocks = 7 [default = true];
optional bool cache_blocks = 7 [default = true];
optional bool cache_blocks = 7 [default = true];
optional bool cache_blocks = 8 [default = true];
optional bool cache_blocks = 8 [default = true];
optional bool cache_blocks = 8 [default = true];
optional bool cacheBlocks = 11;
optional bool cacheBlocks = 11;
optional bool cacheBlocks = 11;
optional uint32 caching = 17;
optional uint32 caching = 17;
optional uint32 caching = 17;
optional int32 caching = 9;
optional int32 caching = 9;
optional int32 caching = 9;
required .CoprocessorServiceCall call = 2;
required .CoprocessorServiceCall call = 2;
required .CoprocessorServiceCall call = 2;
optional uint32 call_id = 1;
optional uint32 call_id = 1;
optional uint32 call_id = 1;
optional uint32 call_id = 1;
optional uint32 call_id = 1;
optional uint32 call_id = 1;
optional string cell_block_codec_class = 3;
optional string cell_block_codec_class = 3;
optional string cell_block_codec_class = 3;
optional string cell_block_compressor_class = 4;
optional string cell_block_compressor_class = 4;
optional string cell_block_compressor_class = 4;
optional .CellBlockMeta cell_block_meta = 5;
optional .CellBlockMeta cell_block_meta = 5;
optional .CellBlockMeta cell_block_meta = 5;
optional .CellBlockMeta cell_block_meta = 3;
optional .CellBlockMeta cell_block_meta = 3;
optional .CellBlockMeta cell_block_meta = 3;
optional string cell_codec_cls_name = 5;
optional string cell_codec_cls_name = 5;
optional string cell_codec_cls_name = 5;
optional .CellType cell_type = 5;
optional .CellType cell_type = 5;
optional .CellType cell_type = 5;
required float chance = 1;
required float chance = 1;
required float chance = 1;
required string charset = 3;
required string charset = 3;
required string charset = 3;
optional string class_name = 1;
optional string class_name = 1;
optional string class_name = 1;
required bool closed = 1;
required bool closed = 1;
required bool closed = 1;
optional bool close_scanner = 5;
optional bool close_scanner = 5;
optional bool close_scanner = 5;
optional bool closest_row_before = 11 [default = false];
optional bool closest_row_before = 11 [default = false];
optional bool closest_row_before = 11 [default = false];
required string cluster_id = 1;
required string cluster_id = 1;
required string cluster_id = 1;
optional .ClusterId cluster_id = 5;
optional .ClusterId cluster_id = 5;
optional .ClusterId cluster_id = 5;
required string clusterkey = 1;
required string clusterkey = 1;
required string clusterkey = 1;
required .ClusterStatus cluster_status = 1;
required .ClusterStatus cluster_status = 1;
required .ClusterStatus cluster_status = 1;
optional bytes column = 2;
optional bytes column = 2;
optional bytes column = 2;
required .ColumnFamilySchema column_families = 2;
required .ColumnFamilySchema column_families = 2;
required .ColumnFamilySchema column_families = 2;
required .ColumnFamilySchema column_families = 2;
required .ColumnFamilySchema column_families = 2;
required .ColumnFamilySchema column_families = 2;
optional bytes column_family = 2;
optional bytes column_family = 2;
optional bytes column_family = 2;
optional bytes column_family = 1;
optional bytes column_family = 1;
optional bytes column_family = 1;
required bytes column_name = 2;
required bytes column_name = 2;
required bytes column_name = 2;
optional bytes column_offset = 3;
optional bytes column_offset = 3;
optional bytes column_offset = 3;
optional bytes column_qualifier = 3;
optional bytes column_qualifier = 3;
optional bytes column_qualifier = 3;
optional bytes column_qualifier = 2;
optional bytes column_qualifier = 2;
optional bytes column_qualifier = 2;
optional bool compaction_state = 2;
optional bool compaction_state = 2;
optional bool compaction_state = 2;
optional .GetRegionInfoResponse.CompactionState compaction_state = 2;
optional .GetRegionInfoResponse.CompactionState compaction_state = 2;
optional .GetRegionInfoResponse.CompactionState compaction_state = 2;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .Comparator comparator = 5;
required .Comparator comparator = 5;
required .Comparator comparator = 5;
optional .Comparator comparator = 2;
optional .Comparator comparator = 2;
optional .Comparator comparator = 2;
required .Comparator comparator = 4;
required .Comparator comparator = 4;
required .Comparator comparator = 4;
optional string comparator_class_name = 11;
optional string comparator_class_name = 11;
optional string comparator_class_name = 11;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareType compare_op = 1;
required .CompareType compare_op = 1;
required .CompareType compare_op = 1;
required .CompareType compare_op = 3;
required .CompareType compare_op = 3;
required .CompareType compare_op = 3;
required .CompareType compare_type = 4;
required .CompareType compare_type = 4;
required .CompareType compare_type = 4;
optional uint64 complete_sequence_id = 15;
optional uint64 complete_sequence_id = 15;
optional uint64 complete_sequence_id = 15;
optional string compression = 5;
optional string compression = 5;
optional string compression = 5;
optional uint32 compression_codec = 12;
optional uint32 compression_codec = 12;
optional uint32 compression_codec = 12;
optional .Condition condition = 3;
optional .Condition condition = 3;
optional .Condition condition = 3;
optional .Condition condition = 3;
optional .Condition condition = 3;
optional .Condition condition = 3;
required int64 count = 1 [default = 0];
required int64 count = 1 [default = 0];
required int64 count = 1 [default = 0];
required uint64 create_time = 3;
required uint64 create_time = 3;
required uint64 create_time = 3;
optional int64 create_time = 6;
optional int64 create_time = 6;
optional int64 create_time = 6;
optional int64 creation_time = 3 [default = 0];
optional int64 creation_time = 3 [default = 0];
optional int64 creation_time = 3 [default = 0];
optional int64 creation_time = 3 [default = 0];
optional int64 creation_time = 3 [default = 0];
optional int64 creation_time = 3 [default = 0];
optional uint64 current_compacted_KVs = 11;
optional uint64 current_compacted_KVs = 11;
optional uint64 current_compacted_KVs = 11;
optional int64 currentCompactedKVs = 13;
optional int64 currentCompactedKVs = 13;
optional int64 currentCompactedKVs = 13;
required bytes data = 3;
required bytes data = 3;
required bytes data = 3;
optional bytes data = 4;
optional bytes data = 4;
optional bytes data = 4;
optional uint32 data_index_count = 5;
optional uint32 data_index_count = 5;
optional uint32 data_index_count = 5;
optional float data_locality = 16;
optional float data_locality = 16;
optional float data_locality = 16;
required string date = 5;
required string date = 5;
required string date = 5;
optional string declaring_class = 1;
optional string declaring_class = 1;
optional string declaring_class = 1;
required .BulkDeleteRequest.DeleteType deleteType = 2;
required .BulkDeleteRequest.DeleteType deleteType = 2;
required .BulkDeleteRequest.DeleteType deleteType = 2;
optional .MutationProto.DeleteType delete_type = 4;
optional .MutationProto.DeleteType delete_type = 4;
optional .MutationProto.DeleteType delete_type = 4;
optional .ServerName destination_server = 4;
optional .ServerName destination_server = 4;
optional .ServerName destination_server = 4;
optional .ServerName dest_server_name = 2;
optional .ServerName dest_server_name = 2;
optional .ServerName dest_server_name = 2;
optional bool done = 1 [default = false];
optional bool done = 1 [default = false];
optional bool done = 1 [default = false];
optional bool done = 1 [default = false];
optional bool done = 1 [default = false];
optional bool done = 1 [default = false];
optional bool done = 1 [default = false];
optional bool done = 1 [default = false];
optional bool done = 1 [default = false];
optional bool do_not_retry = 5;
optional bool do_not_retry = 5;
optional bool do_not_retry = 5;
required double double_msg = 1;
required double double_msg = 1;
required double double_msg = 1;
optional bool drop_dependent_column = 4;
optional bool drop_dependent_column = 4;
optional bool drop_dependent_column = 4;
optional .MutationProto.Durability durability = 6 [default = USE_DEFAULT];
optional .MutationProto.Durability durability = 6 [default = USE_DEFAULT];
optional .MutationProto.Durability durability = 6 [default = USE_DEFAULT];
required string effective_user = 1;
required string effective_user = 1;
required string effective_user = 1;
required bool enable = 1;
required bool enable = 1;
required bool enable = 1;
required bool enabled = 1;
required bool enabled = 1;
required bool enabled = 1;
required bytes encoded_region_name = 2;
required bytes encoded_region_name = 2;
required bytes encoded_region_name = 2;
required bytes encoded_region_name = 1;
required bytes encoded_region_name = 1;
required bytes encoded_region_name = 1;
optional bytes encryption_key = 13;
optional bytes encryption_key = 13;
optional bytes encryption_key = 13;
optional bytes encryption_key = 2;
optional bytes encryption_key = 2;
optional bytes encryption_key = 2;
optional bytes end_key = 4;
optional bytes end_key = 4;
optional bytes end_key = 4;
optional bytes endKey = 3;
optional bytes endKey = 3;
optional bytes endKey = 3;
optional bytes endRow = 2;
optional bytes endRow = 2;
optional bytes endRow = 2;
optional int64 endTime = 6;
optional int64 endTime = 6;
optional int64 endTime = 6;
optional string engine = 4;
optional string engine = 4;
optional string engine = 4;
optional uint64 entry_count = 7;
optional uint64 entry_count = 7;
optional uint64 entry_count = 7;
optional bytes error_info = 3;
optional bytes error_info = 3;
optional bytes error_info = 3;
optional string error_message = 1;
optional string error_message = 1;
optional string error_message = 1;
required string error_message = 2;
required string error_message = 2;
required string error_message = 2;
required uint32 event_type_code = 1;
required uint32 event_type_code = 1;
required uint32 event_type_code = 1;
ForeignExceptionSnare.rethrowException().
optional .NameBytesPair exception = 2;
optional .NameBytesPair exception = 2;
optional .NameBytesPair exception = 2;
optional .NameBytesPair exception = 3;
optional .NameBytesPair exception = 3;
optional .NameBytesPair exception = 3;
optional .ExceptionResponse exception = 2;
optional .ExceptionResponse exception = 2;
optional .ExceptionResponse exception = 2;
optional string exception_class_name = 1;
optional string exception_class_name = 1;
optional string exception_class_name = 1;
optional bool existence_only = 10 [default = false];
optional bool existence_only = 10 [default = false];
optional bool existence_only = 10 [default = false];
optional bool exists = 3;
optional bool exists = 3;
optional bool exists = 3;
required int64 expected_timeout = 1;
required int64 expected_timeout = 1;
required int64 expected_timeout = 1;
required int64 expected_timeout = 1;
required int64 expected_timeout = 1;
required int64 expected_timeout = 1;
required int64 expiration_date = 2;
required int64 expiration_date = 2;
required int64 expiration_date = 2;
optional int64 expiration_date = 5;
optional int64 expiration_date = 5;
optional int64 expiration_date = 5;
required string expression = 1;
required string expression = 1;
required string expression = 1;
optional bytes family = 2;
optional bytes family = 2;
optional bytes family = 2;
optional bytes family = 3;
optional bytes family = 3;
optional bytes family = 3;
optional bytes family = 2;
optional bytes family = 2;
optional bytes family = 2;
required bytes family = 2;
required bytes family = 2;
required bytes family = 2;
required bytes family = 1;
required bytes family = 1;
required bytes family = 1;
required bytes family = 1;
required bytes family = 1;
required bytes family = 1;
required bytes family = 2;
required bytes family = 2;
required bytes family = 2;
required bytes family = 1;
required bytes family = 1;
required bytes family = 1;
required bytes family = 1;
required bytes family = 1;
required bytes family = 1;
required bytes family_name = 1;
required bytes family_name = 1;
required bytes family_name = 1;
required bytes family_name = 3;
required bytes family_name = 3;
required bytes family_name = 3;
required bytes family_name = 1;
required bytes family_name = 1;
required bytes family_name = 1;
optional uint64 file_info_offset = 1;
optional uint64 file_info_offset = 1;
optional uint64 file_info_offset = 1;
optional string file_name = 3;
optional string file_name = 3;
optional string file_name = 3;
optional uint64 file_size = 3;
optional uint64 file_size = 3;
optional uint64 file_size = 3;
optional .Filter filter = 4;
optional .Filter filter = 4;
optional .Filter filter = 4;
optional .Filter filter = 5;
optional .Filter filter = 5;
optional .Filter filter = 5;
required .Filter filter = 1;
required .Filter filter = 1;
required .Filter filter = 1;
required .Filter filter = 1;
required .Filter filter = 1;
required .Filter filter = 1;
required .Filter filter = 1;
required .Filter filter = 1;
required .Filter filter = 1;
optional string filter = 8;
optional string filter = 8;
optional string filter = 8;
optional bool filter_if_missing = 5;
optional bool filter_if_missing = 5;
optional bool filter_if_missing = 5;
required bytes first = 1;
required bytes first = 1;
required bytes first = 1;
optional uint64 first_data_block_offset = 9;
optional uint64 first_data_block_offset = 9;
optional uint64 first_data_block_offset = 9;
optional bool flushed = 2;
optional bool flushed = 2;
optional bool flushed = 2;
optional uint32 following_kv_count = 7;
optional uint32 following_kv_count = 7;
optional uint32 following_kv_count = 7;
optional bool force = 2 [default = false];
optional bool force = 2 [default = false];
optional bool force = 2 [default = false];
optional bool forcible = 3 [default = false];
optional bool forcible = 3 [default = false];
optional bool forcible = 3 [default = false];
optional bool forcible = 3 [default = false];
optional bool forcible = 3 [default = false];
optional bool forcible = 3 [default = false];
optional uint64 from = 1;
optional uint64 from = 1;
optional uint64 from = 1;
required .DelegationToken fs_token = 3;
required .DelegationToken fs_token = 3;
required .DelegationToken fs_token = 3;
optional .GenericExceptionMessage generic_exception = 2;
optional .GenericExceptionMessage generic_exception = 2;
optional .GenericExceptionMessage generic_exception = 2;
optional .Get get = 3;
optional .Get get = 3;
optional .Get get = 3;
required .Get get = 2;
required .Get get = 2;
required .Get get = 2;
optional .GlobalPermission global_permission = 2;
optional .GlobalPermission global_permission = 2;
optional .GlobalPermission global_permission = 2;
length, and
the provided seed value
offset to offset +
length, and the provided seed value.
optional bool has_compression = 1;
optional bool has_compression = 1;
optional bool has_compression = 1;
optional bytes hash = 5;
optional bytes hash = 5;
optional bytes hash = 5;
optional bool has_tag_compression = 3;
optional bool has_tag_compression = 3;
optional bool has_tag_compression = 3;
optional .HBaseVersionFileContent hbase_version = 1;
optional .HBaseVersionFileContent hbase_version = 1;
optional .HBaseVersionFileContent hbase_version = 1;
KeyValue.equals(Object), only uses the key portion, not the value.
optional int32 heapOccupancy = 2 [default = 0];
optional int32 heapOccupancy = 2 [default = 0];
optional int32 heapOccupancy = 2 [default = 0];
optional int32 heapSizeMB = 4;
optional int32 heapSizeMB = 4;
optional int32 heapSizeMB = 4;
optional string hfile = 3;
optional string hfile = 3;
optional string hfile = 3;
required string host_name = 1;
required string host_name = 1;
required string host_name = 1;
optional string hostname = 3;
optional string hostname = 3;
optional string hostname = 3;
Hash
required int32 id = 1;
required int32 id = 1;
required int32 id = 1;
optional int64 id = 4;
optional int64 id = 4;
optional int64 id = 4;
optional bytes identifier = 1;
optional bytes identifier = 1;
optional bytes identifier = 1;
optional bytes identifier = 1;
optional bytes identifier = 1;
optional bytes identifier = 1;
optional uint64 if_older_than_ts = 2;
optional uint64 if_older_than_ts = 2;
optional uint64 if_older_than_ts = 2;
optional uint32 index = 1;
optional uint32 index = 1;
optional uint32 index = 1;
optional uint32 index = 1;
optional uint32 index = 1;
optional uint32 index = 1;
optional int32 infoPort = 1;
optional int32 infoPort = 1;
optional int32 infoPort = 1;
optional uint32 info_server_port = 9;
optional uint32 info_server_port = 9;
optional uint32 info_server_port = 9;
optional bool inMemory = 4;
optional bool inMemory = 4;
optional bool inMemory = 4;
optional string instance = 2;
optional string instance = 2;
optional string instance = 2;
required string interpreter_class_name = 1;
required string interpreter_class_name = 1;
required string interpreter_class_name = 1;
optional bytes interpreter_specific_bytes = 3;
optional bytes interpreter_specific_bytes = 3;
optional bytes interpreter_specific_bytes = 3;
required bool is_master_running = 1;
required bool is_master_running = 1;
required bool is_master_running = 1;
optional bool isRecovering = 3;
optional bool isRecovering = 3;
optional bool isRecovering = 3;
optional bool is_shared = 4;
optional bool is_shared = 4;
optional bool is_shared = 4;
optional int64 issue_date = 4;
optional int64 issue_date = 4;
optional int64 issue_date = 4;
optional bytes iv = 4;
optional bytes iv = 4;
optional bytes iv = 4;
optional string jerseyVersion = 5;
optional string jerseyVersion = 5;
optional string jerseyVersion = 5;
optional string jvmVersion = 2;
optional string jvmVersion = 2;
optional string jvmVersion = 2;
required .WALKey key = 1;
required .WALKey key = 1;
required .WALKey key = 1;
required bytes key = 3;
required bytes key = 3;
required bytes key = 3;
required bytes key = 1;
required bytes key = 1;
required bytes key = 1;
required int32 key_id = 3;
required int32 key_id = 3;
required int32 key_id = 3;
optional .CellType key_type = 5;
optional .CellType key_type = 5;
optional .CellType key_type = 5;
required .TokenIdentifier.Kind kind = 1;
required .TokenIdentifier.Kind kind = 1;
required .TokenIdentifier.Kind kind = 1;
optional string kind = 3;
optional string kind = 3;
optional string kind = 3;
required bytes label = 1;
required bytes label = 1;
required bytes label = 1;
optional uint64 last_data_block_offset = 10;
optional uint64 last_data_block_offset = 10;
optional uint64 last_data_block_offset = 10;
required uint64 last_flushed_sequence_id = 1;
required uint64 last_flushed_sequence_id = 1;
required uint64 last_flushed_sequence_id = 1;
required uint64 last_flushed_sequence_id = 1;
required uint64 last_flushed_sequence_id = 1;
required uint64 last_flushed_sequence_id = 1;
required uint64 last_flush_time = 1;
required uint64 last_flush_time = 1;
required uint64 last_flush_time = 1;
optional bool latest_version_only = 6;
optional bool latest_version_only = 6;
optional bool latest_version_only = 6;
required uint64 least_sig_bits = 1;
required uint64 least_sig_bits = 1;
required uint64 least_sig_bits = 1;
required bool len_as_val = 1;
required bool len_as_val = 1;
required bool len_as_val = 1;
required uint32 length = 2;
required uint32 length = 2;
required uint32 length = 2;
optional uint32 length = 1;
optional uint32 length = 1;
optional uint32 length = 1;
required int32 limit = 1;
required int32 limit = 1;
required int32 limit = 1;
required int32 limit = 1;
required int32 limit = 1;
required int32 limit = 1;
optional int32 line_number = 4;
optional int32 line_number = 4;
optional int32 line_number = 4;
optional .ServerLoad load = 2;
optional .ServerLoad load = 2;
optional .ServerLoad load = 2;
optional bool load_column_families_on_demand = 13;
optional bool load_column_families_on_demand = 13;
optional bool load_column_families_on_demand = 13;
required bool loaded = 1;
required bool loaded = 1;
required bool loaded = 1;
required bool loaded = 1;
required bool loaded = 1;
required bool loaded = 1;
optional uint64 load_on_open_data_offset = 2;
optional uint64 load_on_open_data_offset = 2;
optional uint64 load_on_open_data_offset = 2;
optional .RegionLoadStats loadStats = 5;
optional .RegionLoadStats loadStats = 5;
optional .RegionLoadStats loadStats = 5;
optional string location = 5;
optional string location = 5;
optional string location = 5;
required string lock_owner = 1;
required string lock_owner = 1;
required string lock_owner = 1;
optional .ServerName lock_owner = 2;
optional .ServerName lock_owner = 2;
optional .ServerName lock_owner = 2;
required uint64 log_sequence_number = 3;
required uint64 log_sequence_number = 3;
required uint64 log_sequence_number = 3;
required int64 long_msg = 1;
required int64 long_msg = 1;
required int64 long_msg = 1;
optional bool major = 2;
optional bool major = 2;
optional bool major = 2;
optional .ServerName master = 7;
optional .ServerName master = 7;
optional .ServerName master = 7;
required .ServerName master = 1;
required .ServerName master = 1;
required .ServerName master = 1;
optional bytes max_column = 3;
optional bytes max_column = 3;
optional bytes max_column = 3;
optional bool max_column_inclusive = 4;
optional bool max_column_inclusive = 4;
optional bool max_column_inclusive = 4;
optional uint32 max_heap_MB = 4;
optional uint32 max_heap_MB = 4;
optional uint32 max_heap_MB = 4;
optional int32 maxHeapSizeMB = 5;
optional int32 maxHeapSizeMB = 5;
optional int32 maxHeapSizeMB = 5;
optional uint64 max_result_size = 10;
optional uint64 max_result_size = 10;
optional uint64 max_result_size = 10;
optional uint32 max_versions = 6 [default = 1];
optional uint32 max_versions = 6 [default = 1];
optional uint32 max_versions = 6 [default = 1];
optional uint32 max_versions = 7 [default = 1];
optional uint32 max_versions = 7 [default = 1];
optional uint32 max_versions = 7 [default = 1];
optional int32 maxVersions = 4;
optional int32 maxVersions = 4;
optional int32 maxVersions = 4;
optional int32 maxVersions = 7;
optional int32 maxVersions = 7;
optional int32 maxVersions = 7;
optional int32 memstoreLoad = 1 [default = 0];
optional int32 memstoreLoad = 1 [default = 0];
optional int32 memstoreLoad = 1 [default = 0];
optional uint32 memstore_size_MB = 6;
optional uint32 memstore_size_MB = 6;
optional uint32 memstore_size_MB = 6;
optional int32 memstoreSizeMB = 5;
optional int32 memstoreSizeMB = 5;
optional int32 memstoreSizeMB = 5;
optional string message = 2;
optional string message = 2;
optional string message = 2;
optional uint32 meta_index_count = 6;
optional uint32 meta_index_count = 6;
optional uint32 meta_index_count = 6;
required string method_name = 3;
required string method_name = 3;
required string method_name = 3;
optional string method_name = 2;
optional string method_name = 2;
optional string method_name = 2;
optional string method_name = 3;
optional string method_name = 3;
optional string method_name = 3;
optional bytes min_column = 1;
optional bytes min_column = 1;
optional bytes min_column = 1;
optional bool min_column_inclusive = 2;
optional bool min_column_inclusive = 2;
optional bool min_column_inclusive = 2;
optional .SplitLogTask.RecoveryMode mode = 3 [default = UNKNOWN];
optional .SplitLogTask.RecoveryMode mode = 3 [default = UNKNOWN];
optional .SplitLogTask.RecoveryMode mode = 3 [default = UNKNOWN];
optional bool more_results = 3;
optional bool more_results = 3;
optional bool more_results = 3;
optional bool more_results_in_region = 8;
optional bool more_results_in_region = 8;
optional bool more_results_in_region = 8;
required uint64 most_sig_bits = 2;
required uint64 most_sig_bits = 2;
required uint64 most_sig_bits = 2;
optional .MutationProto.MutationType mutate_type = 2;
optional .MutationProto.MutationType mutate_type = 2;
optional .MutationProto.MutationType mutate_type = 2;
optional .MutationProto mutation = 2;
optional .MutationProto mutation = 2;
optional .MutationProto mutation = 2;
required .MutationProto mutation = 2;
required .MutationProto mutation = 2;
required .MutationProto mutation = 2;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required bytes name = 1;
required bytes name = 1;
required bytes name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
optional string name = 1;
optional string name = 1;
optional string name = 1;
required bytes name = 1;
required bytes name = 1;
required bytes name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
optional string name = 1;
optional string name = 1;
optional string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required bytes name = 1;
required bytes name = 1;
required bytes name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
required string name = 1;
optional string name = 1;
optional string name = 1;
optional string name = 1;
required bytes namespace = 1;
required bytes namespace = 1;
required bytes namespace = 1;
required .NamespaceDescriptor namespaceDescriptor = 1;
required .NamespaceDescriptor namespaceDescriptor = 1;
required .NamespaceDescriptor namespaceDescriptor = 1;
required .NamespaceDescriptor namespaceDescriptor = 1;
required .NamespaceDescriptor namespaceDescriptor = 1;
required .NamespaceDescriptor namespaceDescriptor = 1;
required .NamespaceDescriptor namespaceDescriptor = 1;
required .NamespaceDescriptor namespaceDescriptor = 1;
required .NamespaceDescriptor namespaceDescriptor = 1;
optional bytes namespace_name = 3;
optional bytes namespace_name = 3;
optional bytes namespace_name = 3;
optional bytes namespace_name = 1;
optional bytes namespace_name = 1;
optional bytes namespace_name = 1;
required string namespaceName = 1;
required string namespaceName = 1;
required string namespaceName = 1;
required string namespaceName = 1;
required string namespaceName = 1;
required string namespaceName = 1;
required string namespaceName = 1;
required string namespaceName = 1;
required string namespaceName = 1;
required string namespaceName = 1;
required string namespaceName = 1;
required string namespaceName = 1;
optional .NamespacePermission namespace_permission = 3;
optional .NamespacePermission namespace_permission = 3;
optional .NamespacePermission namespace_permission = 3;
optional uint64 next_call_seq = 6;
optional uint64 next_call_seq = 6;
optional uint64 next_call_seq = 6;
optional uint64 nonce = 9;
optional uint64 nonce = 9;
optional uint64 nonce = 9;
optional uint64 nonce = 3;
optional uint64 nonce = 3;
optional uint64 nonce = 3;
optional uint64 nonce = 5;
optional uint64 nonce = 5;
optional uint64 nonce = 5;
optional uint64 nonce = 10;
optional uint64 nonce = 10;
optional uint64 nonce = 10;
optional uint64 nonceGroup = 2;
optional uint64 nonceGroup = 2;
optional uint64 nonceGroup = 2;
optional uint64 nonce_group = 4;
optional uint64 nonce_group = 4;
optional uint64 nonce_group = 4;
optional uint64 nonce_group = 2;
optional uint64 nonce_group = 2;
optional uint64 nonce_group = 2;
optional uint64 nonce_group = 4;
optional uint64 nonce_group = 4;
optional uint64 nonce_group = 4;
optional uint64 nonceGroup = 9;
optional uint64 nonceGroup = 9;
optional uint64 nonceGroup = 9;
optional uint32 number_of_requests = 1;
optional uint32 number_of_requests = 1;
optional uint32 number_of_requests = 1;
optional uint32 number_of_rows = 4;
optional uint32 number_of_rows = 4;
optional uint32 number_of_rows = 4;
optional uint32 num_data_index_levels = 8;
optional uint32 num_data_index_levels = 8;
optional uint32 num_data_index_levels = 8;
optional bool offline = 5;
optional bool offline = 5;
optional bool offline = 5;
optional int32 offset = 2;
optional int32 offset = 2;
optional int32 offset = 2;
required bool on = 1;
required bool on = 1;
required bool on = 1;
optional bool openForDistributedLogReplay = 4;
optional bool openForDistributedLogReplay = 4;
optional bool openForDistributedLogReplay = 4;
optional uint64 open_seq_num = 3;
optional uint64 open_seq_num = 3;
optional uint64 open_seq_num = 3;
required .FilterList.Operator operator = 1;
required .FilterList.Operator operator = 1;
required .FilterList.Operator operator = 1;
optional uint32 ordinal = 2;
optional uint32 ordinal = 2;
optional uint32 ordinal = 2;
optional string osVersion = 3;
optional string osVersion = 3;
optional string osVersion = 3;
required int64 page_size = 1;
required int64 page_size = 1;
required int64 page_size = 1;
optional int64 parent_id = 2;
optional int64 parent_id = 2;
optional int64 parent_id = 2;
optional bytes password = 2;
optional bytes password = 2;
optional bytes password = 2;
optional bytes password = 2;
optional bytes password = 2;
optional bytes password = 2;
required string path = 2;
required string path = 2;
required string path = 2;
required string pattern = 1;
required string pattern = 1;
required string pattern = 1;
required int32 pattern_flags = 2;
required int32 pattern_flags = 2;
required int32 pattern_flags = 2;
optional bytes payload = 5;
optional bytes payload = 5;
optional bytes payload = 5;
required string peerID = 1;
required string peerID = 1;
required string peerID = 1;
required .Permission permission = 3;
required .Permission permission = 3;
required .Permission permission = 3;
optional uint32 port = 2;
optional uint32 port = 2;
optional uint32 port = 2;
required uint32 port = 1;
required uint32 port = 1;
required uint32 port = 1;
optional int32 port = 4;
optional int32 port = 4;
optional int32 port = 4;
required int64 position = 1;
required int64 position = 1;
required int64 position = 1;
required bytes prefix = 1;
required bytes prefix = 1;
required bytes prefix = 1;
optional bytes prefix = 1;
optional bytes prefix = 1;
optional bytes prefix = 1;
optional bool preserveSplits = 2 [default = false];
optional bool preserveSplits = 2 [default = false];
optional bool preserveSplits = 2 [default = false];
optional bool prev_balance_value = 1;
optional bool prev_balance_value = 1;
optional bool prev_balance_value = 1;
optional bool prev_value = 1;
optional bool prev_value = 1;
optional bool prev_value = 1;
optional uint32 priority = 6;
optional uint32 priority = 6;
optional uint32 priority = 6;
required .ProcedureDescription procedure = 1;
required .ProcedureDescription procedure = 1;
required .ProcedureDescription procedure = 1;
optional .ProcedureDescription procedure = 1;
optional .ProcedureDescription procedure = 1;
optional .ProcedureDescription procedure = 1;
optional bool processed = 2;
optional bool processed = 2;
optional bool processed = 2;
optional bool processed = 2;
optional bool processed = 2;
optional bool processed = 2;
optional string purpose = 5;
optional string purpose = 5;
optional string purpose = 5;
optional bytes qualifier = 3;
optional bytes qualifier = 3;
optional bytes qualifier = 3;
optional bytes qualifier = 3;
optional bytes qualifier = 3;
optional bytes qualifier = 3;
required bytes qualifier = 3;
required bytes qualifier = 3;
required bytes qualifier = 3;
required bytes qualifier = 3;
required bytes qualifier = 3;
required bytes qualifier = 3;
optional bytes qualifier = 1;
optional bytes qualifier = 1;
optional bytes qualifier = 1;
required bytes qualifier = 2;
required bytes qualifier = 2;
required bytes qualifier = 2;
required .Reference.Range range = 2;
required .Reference.Range range = 2;
required .Reference.Range range = 2;
optional bool readOnly = 5;
optional bool readOnly = 5;
optional bool readOnly = 5;
optional uint64 read_requests_count = 8;
optional uint64 read_requests_count = 8;
optional uint64 read_requests_count = 8;
optional int64 readRequestsCount = 7;
optional int64 readRequestsCount = 7;
optional int64 readRequestsCount = 7;
optional string real_user = 2;
optional string real_user = 2;
optional string real_user = 2;
required string reason = 1;
required string reason = 1;
required string reason = 1;
optional .Reference reference = 2;
optional .Reference reference = 2;
optional .Reference reference = 2;
optional string regex = 1;
optional string regex = 1;
optional string regex = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionInfo region = 1;
required .RegionInfo region = 1;
required .RegionInfo region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionInfo region = 1;
required .RegionInfo region = 1;
required .RegionInfo region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
optional .RegionSpecifier region = 1;
optional .RegionSpecifier region = 1;
optional .RegionSpecifier region = 1;
optional .RegionInfo region = 4;
optional .RegionInfo region = 4;
optional .RegionInfo region = 4;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region_a = 1;
required .RegionSpecifier region_a = 1;
required .RegionSpecifier region_a = 1;
required .RegionSpecifier region_a = 1;
required .RegionSpecifier region_a = 1;
required .RegionSpecifier region_a = 1;
required .RegionSpecifier region_b = 2;
required .RegionSpecifier region_b = 2;
required .RegionSpecifier region_b = 2;
required .RegionSpecifier region_b = 2;
required .RegionSpecifier region_b = 2;
required .RegionSpecifier region_b = 2;
required uint64 region_id = 1;
required uint64 region_id = 1;
required uint64 region_id = 1;
required .RegionInfo region_info = 1;
required .RegionInfo region_info = 1;
required .RegionInfo region_info = 1;
required .RegionInfo region_info = 1;
required .RegionInfo region_info = 1;
required .RegionInfo region_info = 1;
required .RegionInfo region_info = 2;
required .RegionInfo region_info = 2;
required .RegionInfo region_info = 2;
required bytes region_name = 1;
required bytes region_name = 1;
required bytes region_name = 1;
optional bytes region_name = 7;
optional bytes region_name = 7;
optional bytes region_name = 7;
required bytes region_name = 2;
required bytes region_name = 2;
required bytes region_name = 2;
optional int32 regions = 3;
optional int32 regions = 3;
optional int32 regions = 3;
required .RegionSpecifier region_specifier = 1;
required .RegionSpecifier region_specifier = 1;
required .RegionSpecifier region_specifier = 1;
required .RegionState region_state = 2;
required .RegionState region_state = 2;
required .RegionState region_state = 2;
optional string replicationEndpointImpl = 2;
optional string replicationEndpointImpl = 2;
optional string replicationEndpointImpl = 2;
required uint64 replicationLag = 5;
required uint64 replicationLag = 5;
required uint64 replicationLag = 5;
optional .ReplicationLoadSink replLoadSink = 11;
optional .ReplicationLoadSink replLoadSink = 11;
optional .ReplicationLoadSink replLoadSink = 11;
optional uint64 report_end_time = 8;
optional uint64 report_end_time = 8;
optional uint64 report_end_time = 8;
optional uint64 report_start_time = 7;
optional uint64 report_start_time = 7;
optional uint64 report_start_time = 7;
required bytes request = 4;
required bytes request = 4;
required bytes request = 4;
optional bool request_param = 4;
optional bool request_param = 4;
optional bool request_param = 4;
optional int32 requests = 4;
optional int32 requests = 4;
optional int32 requests = 3;
optional int32 requests = 3;
optional int32 requests = 3;
optional int32 requests = 4;
optional uint32 response = 1;
optional uint32 response = 1;
optional uint32 response = 1;
optional string restVersion = 1;
optional string restVersion = 1;
optional string restVersion = 1;
optional .Result result = 1;
optional .Result result = 1;
optional .Result result = 1;
optional .Result result = 1;
optional .Result result = 1;
optional .Result result = 1;
optional .Result result = 2;
optional .Result result = 2;
optional .Result result = 2;
optional bool reversed = 15 [default = false];
optional bool reversed = 15 [default = false];
optional bool reversed = 15 [default = false];
required string revision = 3;
required string revision = 3;
required string revision = 3;
optional uint32 root_index_size_KB = 12;
optional uint32 root_index_size_KB = 12;
optional uint32 root_index_size_KB = 12;
optional int32 rootIndexSizeKB = 9;
optional int32 rootIndexSizeKB = 9;
optional int32 rootIndexSizeKB = 9;
optional bytes row = 1;
optional bytes row = 1;
optional bytes row = 1;
required bytes row = 1;
required bytes row = 1;
required bytes row = 1;
required bytes row = 1;
required bytes row = 1;
required bytes row = 1;
required bytes row = 1;
required bytes row = 1;
required bytes row = 1;
required bytes row = 1;
required bytes row = 1;
required bytes row = 1;
optional bytes row = 1;
optional bytes row = 1;
optional bytes row = 1;
optional bytes row = 1;
optional bytes row = 1;
optional bytes row = 1;
required uint32 rowBatchSize = 4;
required uint32 rowBatchSize = 4;
required uint32 rowBatchSize = 4;
required string row_processor_class_name = 1;
required string row_processor_class_name = 1;
required string row_processor_class_name = 1;
optional bytes row_processor_initializer_message = 3;
optional bytes row_processor_initializer_message = 3;
optional bytes row_processor_initializer_message = 3;
optional string row_processor_initializer_message_name = 2;
optional string row_processor_initializer_message_name = 2;
optional string row_processor_initializer_message_name = 2;
required bytes row_processor_result = 1;
required bytes row_processor_result = 1;
required bytes row_processor_result = 1;
required uint64 rowsDeleted = 1;
required uint64 rowsDeleted = 1;
required uint64 rowsDeleted = 1;
optional uint32 rpc_version = 2;
optional uint32 rpc_version = 2;
optional uint32 rpc_version = 2;
optional uint32 rpc_version = 2;
optional uint32 rpc_version = 2;
optional uint32 rpc_version = 2;
required .Scan scan = 1;
required .Scan scan = 1;
required .Scan scan = 1;
required .Scan scan = 2;
required .Scan scan = 2;
required .Scan scan = 2;
optional .Scan scan = 2;
optional .Scan scan = 2;
optional .Scan scan = 2;
optional uint64 scanner_id = 3;
optional uint64 scanner_id = 3;
optional uint64 scanner_id = 3;
optional uint64 scanner_id = 2;
optional uint64 scanner_id = 2;
optional uint64 scanner_id = 2;
optional int32 scan_result = 1;
optional int32 scan_result = 1;
optional int32 scan_result = 1;
required .ScopeType scope_type = 2;
required .ScopeType scope_type = 2;
required .ScopeType scope_type = 2;
required bytes second = 2;
required bytes second = 2;
required bytes second = 2;
optional bytes second_part = 2;
optional bytes second_part = 2;
optional bytes second_part = 2;
required uint64 sequence_id = 2;
required uint64 sequence_id = 2;
required uint64 sequence_id = 2;
optional int64 sequence_number = 6;
optional int64 sequence_number = 6;
optional int64 sequence_number = 6;
optional bytes serialized_comparator = 2;
optional bytes serialized_comparator = 2;
optional bytes serialized_comparator = 2;
optional bytes serialized_filter = 2;
optional bytes serialized_filter = 2;
optional bytes serialized_filter = 2;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required uint64 server_current_time = 3;
required uint64 server_current_time = 3;
required uint64 server_current_time = 3;
required .ServerInfo server_info = 1;
required .ServerInfo server_info = 1;
required .ServerInfo server_info = 1;
required .ServerLoad server_load = 2;
required .ServerLoad server_load = 2;
required .ServerLoad server_load = 2;
required .ServerName server_name = 1;
required .ServerName server_name = 1;
required .ServerName server_name = 1;
required .ServerName server_name = 4;
required .ServerName server_name = 4;
required .ServerName server_name = 4;
required .ServerName server_name = 2;
required .ServerName server_name = 2;
required .ServerName server_name = 2;
optional uint64 serverStartCode = 5;
optional uint64 serverStartCode = 5;
optional uint64 serverStartCode = 5;
optional uint64 serverStartCode = 2;
optional uint64 serverStartCode = 2;
optional uint64 serverStartCode = 2;
required uint64 server_start_code = 2;
required uint64 server_start_code = 2;
required uint64 server_start_code = 2;
optional string serverVersion = 4;
optional string serverVersion = 4;
optional string serverVersion = 4;
optional bytes service = 3;
optional bytes service = 3;
optional bytes service = 3;
optional string service = 4;
optional string service = 4;
optional string service = 4;
optional .CoprocessorServiceCall service_call = 4;
optional .CoprocessorServiceCall service_call = 4;
optional .CoprocessorServiceCall service_call = 4;
required string service_name = 2;
required string service_name = 2;
required string service_name = 2;
optional string service_name = 2;
optional string service_name = 2;
optional string service_name = 2;
optional .CoprocessorServiceResult service_result = 4;
optional .CoprocessorServiceResult service_result = 4;
optional .CoprocessorServiceResult service_result = 4;
required string signature = 1;
required string signature = 1;
required string signature = 1;
required .SingleColumnValueFilter single_column_value_filter = 1;
required .SingleColumnValueFilter single_column_value_filter = 1;
required .SingleColumnValueFilter single_column_value_filter = 1;
required uint32 sizeOfLogQueue = 3;
required uint32 sizeOfLogQueue = 3;
required uint32 sizeOfLogQueue = 3;
optional bool small = 14;
optional bool small = 14;
optional bool small = 14;
required .SnapshotDescription snapshot = 1;
required .SnapshotDescription snapshot = 1;
required .SnapshotDescription snapshot = 1;
optional .ProcedureDescription snapshot = 2;
optional .ProcedureDescription snapshot = 2;
optional .ProcedureDescription snapshot = 2;
optional .SnapshotDescription snapshot = 1;
optional .SnapshotDescription snapshot = 1;
optional .SnapshotDescription snapshot = 1;
optional .SnapshotDescription snapshot = 1;
optional .SnapshotDescription snapshot = 1;
optional .SnapshotDescription snapshot = 1;
optional .SnapshotDescription snapshot = 2;
optional .SnapshotDescription snapshot = 2;
optional .SnapshotDescription snapshot = 2;
required .SnapshotDescription snapshot = 1;
required .SnapshotDescription snapshot = 1;
required .SnapshotDescription snapshot = 1;
required .SnapshotDescription snapshot = 1;
required .SnapshotDescription snapshot = 1;
required .SnapshotDescription snapshot = 1;
optional string source = 1;
optional string source = 1;
optional string source = 1;
required .RegionSpecifier spec = 1;
required .RegionSpecifier spec = 1;
required .RegionSpecifier spec = 1;
optional bool split = 6;
optional bool split = 6;
optional bool split = 6;
required bytes splitkey = 1;
required bytes splitkey = 1;
required bytes splitkey = 1;
optional bytes split_point = 2;
optional bytes split_point = 2;
optional bytes split_point = 2;
required string src_checksum = 6;
required string src_checksum = 6;
required string src_checksum = 6;
optional string stack_trace = 2;
optional string stack_trace = 2;
optional string stack_trace = 2;
optional uint64 stamp = 3;
optional uint64 stamp = 3;
optional uint64 stamp = 3;
optional uint64 start_code = 3;
optional uint64 start_code = 3;
optional uint64 start_code = 3;
optional int64 startCode = 2;
optional int64 startCode = 2;
optional int64 startCode = 2;
required string start_date = 1;
required string start_date = 1;
required string start_date = 1;
optional bytes start_key = 3;
optional bytes start_key = 3;
optional bytes start_key = 3;
optional bytes startKey = 2;
optional bytes startKey = 2;
optional bytes startKey = 2;
optional bytes start_row = 3;
optional bytes start_row = 3;
optional bytes start_row = 3;
optional bytes startRow = 1;
optional bytes startRow = 1;
optional bytes startRow = 1;
optional int64 startTime = 5;
optional int64 startTime = 5;
optional int64 startTime = 5;
required .RegionState.State state = 2;
required .RegionState.State state = 2;
required .RegionState.State state = 2;
optional .RegionState.State state = 3;
optional .RegionState.State state = 3;
optional .RegionState.State state = 3;
required .ReplicationState.State state = 1;
required .ReplicationState.State state = 1;
required .ReplicationState.State state = 1;
required .SplitLogTask.State state = 1;
required .SplitLogTask.State state = 1;
required .SplitLogTask.State state = 1;
required .Table.State state = 1 [default = ENABLED];
required .Table.State state = 1 [default = ENABLED];
required .Table.State state = 1 [default = ENABLED];
optional bytes stop_row = 4;
optional bytes stop_row = 4;
optional bytes stop_row = 4;
optional bytes stop_row_key = 1;
optional bytes stop_row_key = 1;
optional bytes stop_row_key = 1;
optional uint32 storefile_index_size_MB = 7;
optional uint32 storefile_index_size_MB = 7;
optional uint32 storefile_index_size_MB = 7;
optional int32 storefileIndexSizeMB = 6;
optional int32 storefileIndexSizeMB = 6;
optional int32 storefileIndexSizeMB = 6;
optional uint32 storefiles = 3;
optional uint32 storefiles = 3;
optional uint32 storefiles = 3;
optional int32 storefiles = 3;
optional int32 storefiles = 3;
optional int32 storefiles = 3;
optional uint32 storefile_size_MB = 5;
optional uint32 storefile_size_MB = 5;
optional uint32 storefile_size_MB = 5;
optional int32 storefileSizeMB = 4;
optional int32 storefileSizeMB = 4;
optional int32 storefileSizeMB = 4;
required string store_home_dir = 6;
required string store_home_dir = 6;
required string store_home_dir = 6;
optional uint32 store_limit = 8;
optional uint32 store_limit = 8;
optional uint32 store_limit = 8;
optional uint32 store_limit = 11;
optional uint32 store_limit = 11;
optional uint32 store_limit = 11;
optional uint32 store_offset = 9;
optional uint32 store_offset = 9;
optional uint32 store_offset = 9;
optional uint32 store_offset = 12;
optional uint32 store_offset = 12;
optional uint32 store_offset = 12;
optional uint32 stores = 2;
optional uint32 stores = 2;
optional uint32 stores = 2;
optional int32 stores = 2;
optional int32 stores = 2;
optional int32 stores = 2;
optional uint32 store_uncompressed_size_MB = 4;
optional uint32 store_uncompressed_size_MB = 4;
optional uint32 store_uncompressed_size_MB = 4;
required string substr = 1;
required string substr = 1;
required string substr = 1;
optional bool synchronous = 2;
optional bool synchronous = 2;
optional bool synchronous = 2;
optional string table = 2;
optional string table = 2;
optional string table = 2;
optional .TableSchema table = 3;
optional .TableSchema table = 3;
optional .TableSchema table = 3;
optional .TableName table_name = 2;
optional .TableName table_name = 2;
optional .TableName table_name = 2;
optional .TableName table_name = 1;
optional .TableName table_name = 1;
optional .TableName table_name = 1;
required .TableName table_name = 2;
required .TableName table_name = 2;
required .TableName table_name = 2;
optional .TableName table_name = 1;
optional .TableName table_name = 1;
optional .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName tableName = 1;
required .TableName tableName = 1;
required .TableName tableName = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required bytes table_name = 1;
required bytes table_name = 1;
required bytes table_name = 1;
required bytes table_name = 2;
required bytes table_name = 2;
required bytes table_name = 2;
optional .TableName table_name = 1;
optional .TableName table_name = 1;
optional .TableName table_name = 1;
optional .TablePermission table_permission = 4;
optional .TablePermission table_permission = 4;
optional .TablePermission table_permission = 4;
required .TableSchema table_schema = 1;
required .TableSchema table_schema = 1;
required .TableSchema table_schema = 1;
required .TableSchema table_schema = 2;
required .TableSchema table_schema = 2;
required .TableSchema table_schema = 2;
required .TableSchema table_schema = 1;
required .TableSchema table_schema = 1;
required .TableSchema table_schema = 1;
optional bytes tags = 7;
optional bytes tags = 7;
optional bytes tags = 7;
optional bytes tags = 7;
optional bytes tags = 7;
optional bytes tags = 7;
optional bytes tags = 5;
optional bytes tags = 5;
optional bytes tags = 5;
optional int64 thread_id = 3;
optional int64 thread_id = 3;
optional int64 thread_id = 3;
optional .TimeRange time_range = 5;
optional .TimeRange time_range = 5;
optional .TimeRange time_range = 5;
optional .TimeRange time_range = 7;
optional .TimeRange time_range = 7;
optional .TimeRange time_range = 7;
optional .TimeRange time_range = 6;
optional .TimeRange time_range = 6;
optional .TimeRange time_range = 6;
optional uint64 timestamp = 3;
optional uint64 timestamp = 3;
optional uint64 timestamp = 3;
optional uint64 timestamp = 4;
optional uint64 timestamp = 4;
optional uint64 timestamp = 4;
optional uint64 timestamp = 4;
optional uint64 timestamp = 4;
optional uint64 timestamp = 4;
optional uint64 timestamp = 4;
optional uint64 timestamp = 3;
optional uint64 timestamp = 3;
optional uint64 timestamp = 3;
optional uint64 timestamp = 4;
optional uint64 timestamp = 4;
optional int64 timestamp = 3;
optional int64 timestamp = 3;
optional int64 timestamp = 3;
required uint64 timeStampOfLastShippedOp = 4;
required uint64 timeStampOfLastShippedOp = 4;
required uint64 timeStampOfLastShippedOp = 4;
required uint64 timeStampsOfLastAppliedOp = 2;
required uint64 timeStampsOfLastAppliedOp = 2;
required uint64 timeStampsOfLastAppliedOp = 2;
optional uint64 to = 2;
optional uint64 to = 2;
optional uint64 to = 2;
optional .Token token = 1;
optional .Token token = 1;
optional .Token token = 1;
optional uint64 total_compacting_KVs = 10;
optional uint64 total_compacting_KVs = 10;
optional uint64 total_compacting_KVs = 10;
optional int64 totalCompactingKVs = 12;
optional int64 totalCompactingKVs = 12;
optional int64 totalCompactingKVs = 12;
optional uint32 total_number_of_requests = 2;
optional uint32 total_number_of_requests = 2;
optional uint32 total_number_of_requests = 2;
optional uint32 total_regions = 2;
optional uint32 total_regions = 2;
optional uint32 total_regions = 2;
optional uint32 total_static_bloom_size_KB = 14;
optional uint32 total_static_bloom_size_KB = 14;
optional uint32 total_static_bloom_size_KB = 14;
optional int32 totalStaticBloomSizeKB = 11;
optional int32 totalStaticBloomSizeKB = 11;
optional int32 totalStaticBloomSizeKB = 11;
optional uint32 total_static_index_size_KB = 13;
optional uint32 total_static_index_size_KB = 13;
optional uint32 total_static_index_size_KB = 13;
optional int32 totalStaticIndexSizeKB = 10;
optional int32 totalStaticIndexSizeKB = 10;
optional int32 totalStaticIndexSizeKB = 10;
optional uint64 total_uncompressed_bytes = 4;
optional uint64 total_uncompressed_bytes = 4;
optional uint64 total_uncompressed_bytes = 4;
optional int64 trace_id = 1;
optional int64 trace_id = 1;
optional int64 trace_id = 1;
optional .RPCTInfo trace_info = 2;
optional .RPCTInfo trace_info = 2;
optional .RPCTInfo trace_info = 2;
required .RegionStateTransition.TransitionCode transition_code = 1;
required .RegionStateTransition.TransitionCode transition_code = 1;
required .RegionStateTransition.TransitionCode transition_code = 1;
optional bool transition_in_ZK = 3 [default = true];
optional bool transition_in_ZK = 3 [default = true];
optional bool transition_in_ZK = 3 [default = true];
optional uint32 ttl = 4;
optional uint32 ttl = 4;
optional uint32 ttl = 4;
optional int32 ttl = 3;
optional int32 ttl = 3;
optional int32 ttl = 3;
optional .Permission.Type type = 1;
optional .Permission.Type type = 1;
optional .Permission.Type type = 1;
required .Permission.Type type = 1;
required .Permission.Type type = 1;
required .Permission.Type type = 1;
required .RegionSpecifier.RegionSpecifierType type = 1;
required .RegionSpecifier.RegionSpecifierType type = 1;
required .RegionSpecifier.RegionSpecifierType type = 1;
optional .SnapshotDescription.Type type = 4 [default = FLUSH];
optional .SnapshotDescription.Type type = 4 [default = FLUSH];
optional .SnapshotDescription.Type type = 4 [default = FLUSH];
required .SnapshotFileInfo.Type type = 1;
required .SnapshotFileInfo.Type type = 1;
required .SnapshotFileInfo.Type type = 1;
optional uint64 uncompressed_data_index_size = 3;
optional uint64 uncompressed_data_index_size = 3;
optional uint64 uncompressed_data_index_size = 3;
required string url = 2;
required string url = 2;
required string url = 2;
optional uint32 used_heap_MB = 3;
optional uint32 used_heap_MB = 3;
optional uint32 used_heap_MB = 3;
required bytes user = 1;
required bytes user = 1;
required bytes user = 1;
required bytes user = 1;
required bytes user = 1;
required bytes user = 1;
required string user = 4;
required string user = 4;
required string user = 4;
required bytes user = 1;
required bytes user = 1;
required bytes user = 1;
required bytes user = 1;
required bytes user = 1;
required bytes user = 1;
required bytes user = 1;
required bytes user = 1;
required bytes user = 1;
required bytes user = 1;
required bytes user = 1;
required bytes user = 1;
optional .UserInformation user_info = 1;
optional .UserInformation user_info = 1;
optional .UserInformation user_info = 1;
required bytes username = 2;
required bytes username = 2;
required bytes username = 2;
optional string username = 1;
optional string username = 1;
optional string username = 1;
required .UserPermission user_permission = 1;
required .UserPermission user_permission = 1;
required .UserPermission user_permission = 1;
required .UserPermission user_permission = 1;
required .UserPermission user_permission = 1;
required .UserPermission user_permission = 1;
optional bytes value = 6;
optional bytes value = 6;
optional bytes value = 6;
optional bytes value = 6;
optional bytes value = 6;
optional bytes value = 6;
required .NameBytesPair value = 2;
required .NameBytesPair value = 2;
required .NameBytesPair value = 2;
optional .NameBytesPair value = 1;
optional .NameBytesPair value = 1;
optional .NameBytesPair value = 1;
optional bytes value = 2;
optional bytes value = 2;
optional bytes value = 2;
optional bytes value = 1;
optional bytes value = 1;
optional bytes value = 1;
optional bytes value = 2;
optional bytes value = 2;
optional bytes value = 2;
optional int64 value = 2;
optional int64 value = 2;
optional int64 value = 2;
required string value = 2;
required string value = 2;
required string value = 2;
required bytes value = 2;
required bytes value = 2;
required bytes value = 2;
required bool value = 1;
required bool value = 1;
required bool value = 1;
required string value = 2;
required string value = 2;
required string value = 2;
required string value = 2;
required string value = 2;
required string value = 2;
required string version = 1;
required string version = 1;
required string version = 1;
optional int32 version = 5;
optional int32 version = 5;
optional int32 version = 5;
required string version = 1;
required string version = 1;
required string version = 1;
optional int32 version = 1;
optional int32 version = 1;
optional int32 version = 1;
optional .VersionInfo version_info = 5;
optional .VersionInfo version_info = 5;
optional .VersionInfo version_info = 5;
optional uint32 version_of_closing_node = 2;
optional uint32 version_of_closing_node = 2;
optional uint32 version_of_closing_node = 2;
optional uint32 version_of_offline_node = 2;
optional uint32 version_of_offline_node = 2;
optional uint32 version_of_offline_node = 2;
optional uint64 versionsDeleted = 2;
optional uint64 versionsDeleted = 2;
optional uint64 versionsDeleted = 2;
optional string wal_name = 5;
optional string wal_name = 5;
optional string wal_name = 5;
optional string wal_server = 4;
optional string wal_server = 4;
optional string wal_server = 4;
optional uint32 webui_port = 2;
optional uint32 webui_port = 2;
optional uint32 webui_port = 2;
optional string writer_cls_name = 4;
optional string writer_cls_name = 4;
optional string writer_cls_name = 4;
optional uint64 write_requests_count = 9;
optional uint64 write_requests_count = 9;
optional uint64 write_requests_count = 9;
optional int64 writeRequestsCount = 8;
optional int64 writeRequestsCount = 8;
optional int64 writeRequestsCount = 8;
required uint64 write_time = 4;
required uint64 write_time = 4;
required uint64 write_time = 4;
optional uint32 yet_to_update_regions = 1;
optional uint32 yet_to_update_regions = 1;
optional uint32 yet_to_update_regions = 1;
HBASE_AUTH_TOKEN = 0;
Configuration
instance.
ReplicationEndpoint implementation for replicating to another HBase cluster.BigDecimalMsgBigDecimalMsgBytesBytesPairBytesBytesPairColumnFamilySchemaColumnFamilySchemaCompareTypeCoprocessorCoprocessorDoubleMsgDoubleMsgEmptyMsgEmptyMsgFavoredNodesFavoredNodesLongMsgLongMsgNameBytesPairNameBytesPairNameInt64PairNameInt64PairNamespaceDescriptorNamespaceDescriptorNameStringPairNameStringPairProcedureDescriptionProcedureDescriptionRegionInfoRegionInfoRegionServerInfoRegionServerInfoRegionSpecifierRegionSpecifierRegionSpecifier.RegionSpecifierTypeServerNameServerNameSnapshotDescriptionSnapshotDescriptionSnapshotDescription.TypeTableNameTableNameTableSchemaTableSchemaTimeRangeTimeRangeUUIDUUIDBaseReplicationEndpoint for replication endpoints whose
target cluster is an HBase cluster.ByteString without copy.HColumnDescriptor.HColumnDescriptor(String) and setters
HColumnDescriptor.HColumnDescriptor(String) and setters
HColumnDescriptor.HColumnDescriptor(String) and setters
HConnection instance based on the given Configuration.HConnections.HFile.HFileHFILE = 1;
StoreFiles)
for a HRegion from the FileSystem.HFile version 1 and 2 blocks, and writing version 2 blocks.HFileBlocks.HFile block writer.HFileBlockDecodingContext.HFileBlockEncodingContext.HFileBlockIndex.BlockIndexWriter) and read
(HFileBlockIndex.BlockIndexReader) single-level and multi-level block indexes.HFileOutputFormat2 instead.HFiles.FileInfoProtoFileInfoProtoFileTrailerProtoFileTrailerProtoHFile reader for version 2.HFileScanner interface.HFile reader for version 3.HFileScanner interface.HFile writer for version 3.InputFormat for HLog files.HRegion.startRegionOperation() to provide operation context for
startRegionOperation to possibly invoke different checks before any region operations. -ROOT- or
hbase:meta , if the table is read only, the maximum size of the memstore,
when the region split should occur, coprocessors associated with it etc...hbase:meta and -ROOT-.
hbase:meta and -ROOT-.
HConnectionManager.createConnection(Configuration).HConnection.getTable(String).Put or
Delete instances)
passed to it out to the configured HBase table.ibw.
Export.InterfaceAudience.Public.INCREMENT = 1;
HTableInterface.incrementColumnValue(byte[], byte[], byte[], long, Durability)
HTable.incrementColumnValue(byte[], byte[], byte[], long, Durability)
HTableInterface.incrementColumnValue(byte[], byte[], byte[], long, Durability)
HTableInterface.incrementColumnValue(byte[], byte[], byte[], long, Durability)
TIncrement create an Increment.
target in
array.
target within array, or -1 if there is no such occurrence.
HFile.CodedInputStream from a PositionedByteRange.
threadToJoin.
UserProvider specified in the configuration and set the passed
configuration via BaseConfigurable.setConf(Configuration)
Filter to apply to all incoming keys (KeyValues) to
optionally not include in the job output
HRegionServer.setupWALAndReplication() creating WAL instance.
indices[i] represents the
randomized column index corresponding to randomized row index i,
create a new array with the corresponding inverted indices.
BucketCache. hbase:meta or -ROOT-
rpc IsBalancerEnabled(.IsBalancerEnabledRequest) returns (.IsBalancerEnabledResponse);
rpc IsBalancerEnabled(.IsBalancerEnabledRequest) returns (.IsBalancerEnabledResponse);
LoadBalancerTracker.
src uses BlobCopy
encoding, false otherwise.
src uses BlobVar
encoding, false otherwise.
rpc IsCatalogJanitorEnabled(.IsCatalogJanitorEnabledRequest) returns (.IsCatalogJanitorEnabledResponse);
rpc IsCatalogJanitorEnabled(.IsCatalogJanitorEnabledRequest) returns (.IsCatalogJanitorEnabledResponse);
ZooKeeperProtos.Table.State#DISABLED.
ZooKeeperProtos.Table.State#DISABLING
of ZooKeeperProtos.Table.State#DISABLED.
ZooKeeperProtos.Table.State#ENABLED.
src appears to be positioned an encoded value,
false otherwise.
src uses fixed-width
Float32 encoding, false otherwise.
src uses fixed-width
Float64 encoding, false otherwise.
src uses fixed-width
Int32 encoding, false otherwise.
src uses fixed-width
Int64 encoding, false otherwise.
[a-zA-Z_0-9] or '_', '.' or '-'.
rpc IsMasterRunning(.IsMasterRunningRequest) returns (.IsMasterRunningResponse);
rpc IsMasterRunning(.IsMasterRunningRequest) returns (.IsMasterRunningResponse);
hbase:meta
region.
hbase:meta table
src is null, false
otherwise.
src uses Numeric
encoding, false otherwise.
src uses Numeric
encoding and is Infinite, false otherwise.
src uses Numeric
encoding and is NaN, false otherwise.
src uses Numeric
encoding and is 0, false otherwise.
byte[]'s
which preserve the natural sort order of the unencoded value.
rpc IsProcedureDone(.IsProcedureDoneRequest) returns (.IsProcedureDoneResponse);
rpc IsProcedureDone(.IsProcedureDoneRequest) returns (.IsProcedureDoneResponse);
rpc IsRestoreSnapshotDone(.IsRestoreSnapshotDoneRequest) returns (.IsRestoreSnapshotDoneResponse);
rpc IsRestoreSnapshotDone(.IsRestoreSnapshotDoneRequest) returns (.IsRestoreSnapshotDoneResponse);
-ROOT- region.
hbase.security.authentication is set to
kerberos.
rpc IsSnapshotDone(.IsSnapshotDoneRequest) returns (.IsSnapshotDoneResponse);
rpc IsSnapshotDone(.IsSnapshotDoneRequest) returns (.IsSnapshotDoneResponse);
HBaseAdmin.isTableEnabled(byte[])
HBaseAdmin.isTableEnabled(byte[])
HBaseAdmin.isTableEnabled(byte[])
HBaseAdmin.isTableEnabled(byte[])
HBaseAdmin.isTableEnabled(byte[])
HBaseAdmin.isTableEnabled(org.apache.hadoop.hbase.TableName tableName)
src uses Text encoding,
false otherwise.
HFile that
does not do checksum verification in the file system
Iterator over the values encoded in src.
UnsupportedOperationException
JenkinsHash.
length.
length.
KeyValue.KVComparator for hbase:meta catalog table
KeyValues.Set of KeyValues implemented on top of a
ConcurrentSkipListMap.buff.
LESS_OR_EQUAL = 1;
LESS = 0;
Result.listCells()
rpc listLabels(.ListLabelsRequest) returns (.ListLabelsResponse);
rpc listLabels(.ListLabelsRequest) returns (.ListLabelsResponse);
rpc ListNamespaceDescriptors(.ListNamespaceDescriptorsRequest) returns (.ListNamespaceDescriptorsResponse);
rpc ListNamespaceDescriptors(.ListNamespaceDescriptorsRequest) returns (.ListNamespaceDescriptorsResponse);
ReplicationAdmin.listPeerConfigs()
rpc ListTableDescriptorsByNamespace(.ListTableDescriptorsByNamespaceRequest) returns (.ListTableDescriptorsByNamespaceResponse);
rpc ListTableDescriptorsByNamespace(.ListTableDescriptorsByNamespaceRequest) returns (.ListTableDescriptorsByNamespaceResponse);
rpc ListTableNamesByNamespace(.ListTableNamesByNamespaceRequest) returns (.ListTableNamesByNamespaceResponse);
rpc ListTableNamesByNamespace(.ListTableNamesByNamespaceRequest) returns (.ListTableNamesByNamespaceResponse);
LoadBalancerStateLoadBalancerStateResults in the cache.
HRegionInfo from the serialized version on-disk.
ByteBuffer.
ByteBuffer.
LOG_REPLAY = 2;
LOG_SPLITTING = 1;
BaseHFileCleanerDelegate that only cleans HFiles that don't belong to a table that is
currently being archived.HeapSize,
memory-bound using an LRU eviction algorithm, and concurrent: backed by a
ConcurrentHashMap and with a non-blocking eviction thread giving
constant-time LruBlockCache.cacheBlock(org.apache.hadoop.hbase.io.hfile.BlockCacheKey, org.apache.hadoop.hbase.io.hfile.Cacheable, boolean) and LruBlockCache.getBlock(org.apache.hadoop.hbase.io.hfile.BlockCacheKey, boolean, boolean, boolean) operations.LruBlockCache.MAJOR_AND_MINOR = 3;
MAJOR = 2;
ScanMetricsScanMetricsTableSnapshotRegionSplitTableSnapshotRegionSplitzookeeper reference.
Services
against the active master.HMaster process.AddColumnRequestAddColumnRequestAddColumnResponseAddColumnResponseAssignRegionRequestAssignRegionRequestAssignRegionResponseAssignRegionResponseBalanceRequestBalanceRequestBalanceResponseBalanceResponseCreateNamespaceRequestCreateNamespaceRequestCreateNamespaceResponseCreateNamespaceResponseCreateTableRequestCreateTableRequestCreateTableResponseCreateTableResponseDeleteColumnRequestDeleteColumnRequestDeleteColumnResponseDeleteColumnResponseDeleteNamespaceRequestDeleteNamespaceRequestDeleteNamespaceResponseDeleteNamespaceResponseDeleteSnapshotRequestDeleteSnapshotRequestDeleteSnapshotResponseDeleteSnapshotResponseDeleteTableRequestDeleteTableRequestDeleteTableResponseDeleteTableResponseDisableTableRequestDisableTableRequestDisableTableResponseDisableTableResponseDispatchMergingRegionsRequestDispatchMergingRegionsRequestDispatchMergingRegionsResponseDispatchMergingRegionsResponseEnableCatalogJanitorRequestEnableCatalogJanitorRequestEnableCatalogJanitorResponseEnableCatalogJanitorResponseEnableTableRequestEnableTableRequestEnableTableResponseEnableTableResponseExecProcedureRequestExecProcedureRequestExecProcedureResponseExecProcedureResponseGetClusterStatusRequestGetClusterStatusRequestGetClusterStatusResponseGetClusterStatusResponseGetCompletedSnapshotsRequestGetCompletedSnapshotsRequestGetCompletedSnapshotsResponseGetCompletedSnapshotsResponseGetNamespaceDescriptorRequestGetNamespaceDescriptorRequestGetNamespaceDescriptorResponseGetNamespaceDescriptorResponseGetSchemaAlterStatusRequestGetSchemaAlterStatusRequestGetSchemaAlterStatusResponseGetSchemaAlterStatusResponseGetTableDescriptorsRequestGetTableDescriptorsRequestGetTableDescriptorsResponseGetTableDescriptorsResponseGetTableNamesRequestGetTableNamesRequestGetTableNamesResponseGetTableNamesResponseIsBalancerEnabledRequestIsBalancerEnabledRequestIsBalancerEnabledResponseIsBalancerEnabledResponseIsCatalogJanitorEnabledRequestIsCatalogJanitorEnabledRequestIsCatalogJanitorEnabledResponseIsCatalogJanitorEnabledResponseIsMasterRunningRequestIsMasterRunningRequestIsMasterRunningResponseIsMasterRunningResponseIsProcedureDoneRequestIsProcedureDoneRequestIsProcedureDoneResponseIsProcedureDoneResponseIsRestoreSnapshotDoneRequestIsRestoreSnapshotDoneRequestIsRestoreSnapshotDoneResponseIsRestoreSnapshotDoneResponseIsSnapshotDoneRequestIsSnapshotDoneRequestIsSnapshotDoneResponseIsSnapshotDoneResponseListNamespaceDescriptorsRequestListNamespaceDescriptorsRequestListNamespaceDescriptorsResponseListNamespaceDescriptorsResponseListTableDescriptorsByNamespaceRequestListTableDescriptorsByNamespaceRequestListTableDescriptorsByNamespaceResponseListTableDescriptorsByNamespaceResponseListTableNamesByNamespaceRequestListTableNamesByNamespaceRequestListTableNamesByNamespaceResponseListTableNamesByNamespaceResponseMasterServiceModifyColumnRequestModifyColumnRequestModifyColumnResponseModifyColumnResponseModifyNamespaceRequestModifyNamespaceRequestModifyNamespaceResponseModifyNamespaceResponseModifyTableRequestModifyTableRequestModifyTableResponseModifyTableResponseMoveRegionRequestMoveRegionRequestMoveRegionResponseMoveRegionResponseOfflineRegionRequestOfflineRegionRequestOfflineRegionResponseOfflineRegionResponseRestoreSnapshotRequestRestoreSnapshotRequestRestoreSnapshotResponseRestoreSnapshotResponseRunCatalogScanRequestRunCatalogScanRequestRunCatalogScanResponseRunCatalogScanResponseSetBalancerRunningRequestSetBalancerRunningRequestSetBalancerRunningResponseSetBalancerRunningResponseShutdownRequestShutdownRequestShutdownResponseShutdownResponseSnapshotRequestSnapshotRequestSnapshotResponseSnapshotResponseStopMasterRequestStopMasterRequestStopMasterResponseStopMasterResponseTruncateTableRequestTruncateTableRequestTruncateTableResponseTruncateTableResponseUnassignRegionRequestUnassignRegionRequestUnassignRegionResponseUnassignRegionResponsetrue if this permission matches the given column
family at least.
TablePermission matching up
to the column family portion of a permission.
long.
MAXIMUM = 255;
MemStoreLAB.Chunk instances.MERGE_PONR = 6;
MERGE_REVERTED = 10;
required .CoprocessorServiceCall call = 2;
optional .CellBlockMeta cell_block_meta = 5;
optional .CellBlockMeta cell_block_meta = 3;
optional .ClusterId cluster_id = 5;
required .ClusterStatus cluster_status = 1;
required .ColumnFamilySchema column_families = 2;
required .ColumnFamilySchema column_families = 2;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .ByteArrayComparable comparable = 1;
required .Comparator comparator = 5;
optional .Comparator comparator = 2;
required .Comparator comparator = 4;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
required .CompareFilter compare_filter = 1;
optional .Condition condition = 3;
optional .Condition condition = 3;
MERGED = 12;
MERGED = 8;
optional .ServerName destination_server = 4;
optional .ServerName dest_server_name = 2;
optional .NameBytesPair exception = 2;
optional .NameBytesPair exception = 3;
optional .ExceptionResponse exception = 2;
optional .Filter filter = 4;
optional .Filter filter = 5;
required .Filter filter = 1;
required .Filter filter = 1;
required .Filter filter = 1;
required .DelegationToken fs_token = 3;
optional .GenericExceptionMessage generic_exception = 2;
optional .Get get = 3;
required .Get get = 2;
optional .GlobalPermission global_permission = 2;
optional .HBaseVersionFileContent hbase_version = 1;
required .WALKey key = 1;
optional .ServerLoad load = 2;
optional .RegionLoadStats loadStats = 5;
optional .ServerName lock_owner = 2;
optional .ServerName master = 7;
required .ServerName master = 1;
optional .MutationProto mutation = 2;
required .MutationProto mutation = 2;
required .NamespaceDescriptor namespaceDescriptor = 1;
required .NamespaceDescriptor namespaceDescriptor = 1;
required .NamespaceDescriptor namespaceDescriptor = 1;
optional .NamespacePermission namespace_permission = 3;
required .Permission permission = 3;
required .ProcedureDescription procedure = 1;
optional .ProcedureDescription procedure = 1;
optional .Reference reference = 2;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionInfo region = 1;
required .RegionSpecifier region = 1;
required .RegionInfo region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
optional .RegionSpecifier region = 1;
optional .RegionInfo region = 4;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region = 1;
required .RegionSpecifier region_a = 1;
required .RegionSpecifier region_a = 1;
required .RegionSpecifier region_b = 2;
required .RegionSpecifier region_b = 2;
required .RegionInfo region_info = 1;
required .RegionInfo region_info = 1;
required .RegionInfo region_info = 2;
rpc MergeRegions(.MergeRegionsRequest) returns (.MergeRegionsResponse);
rpc MergeRegions(.MergeRegionsRequest) returns (.MergeRegionsResponse);
required .RegionSpecifier region_specifier = 1;
required .RegionState region_state = 2;
optional .ReplicationLoadSink replLoadSink = 11;
optional .Result result = 1;
optional .Result result = 1;
optional .Result result = 2;
required .Scan scan = 1;
required .Scan scan = 2;
optional .Scan scan = 2;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerName server = 1;
required .ServerInfo server_info = 1;
required .ServerLoad server_load = 2;
required .ServerName server_name = 1;
required .ServerName server_name = 4;
required .ServerName server_name = 2;
optional .CoprocessorServiceCall service_call = 4;
optional .CoprocessorServiceResult service_result = 4;
required .SingleColumnValueFilter single_column_value_filter = 1;
required .SnapshotDescription snapshot = 1;
optional .ProcedureDescription snapshot = 2;
optional .SnapshotDescription snapshot = 1;
optional .SnapshotDescription snapshot = 1;
optional .SnapshotDescription snapshot = 2;
required .SnapshotDescription snapshot = 1;
required .SnapshotDescription snapshot = 1;
required .RegionSpecifier spec = 1;
optional .TableSchema table = 3;
optional .TableName table_name = 2;
optional .TableName table_name = 1;
required .TableName table_name = 2;
optional .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName table_name = 1;
required .TableName tableName = 1;
required .TableName table_name = 1;
optional .TableName table_name = 1;
optional .TablePermission table_permission = 4;
required .TableSchema table_schema = 1;
required .TableSchema table_schema = 2;
required .TableSchema table_schema = 1;
optional .TimeRange time_range = 5;
optional .TimeRange time_range = 7;
optional .TimeRange time_range = 6;
optional .Token token = 1;
optional .RPCTInfo trace_info = 2;
optional .UserInformation user_info = 1;
required .UserPermission user_permission = 1;
required .UserPermission user_permission = 1;
required .NameBytesPair value = 2;
optional .NameBytesPair value = 1;
optional .VersionInfo version_info = 5;
MERGING_NEW = 14;
MERGING = 11;
KeyValue.KVComparator for hbase:meta catalog table
KeyValues.
Writables representing meta block data.
hbase:meta.hbase:meta.hbase:meta table scanning logic.hbase:metaMetricMutableQuantiles for a metric that rolls itself over on the
specified time interval.
MetricSampleQuantiles object.ScheduledExecutorService used by MetricMutableQuantilesMetricMutableQuantilesMetricsThriftServerSource
Implements BaseSource through BaseSourceImpl, following the patternHFile's mid-key.
MINIMUM = 0;
MINOR = 1;
rpc ModifyColumn(.ModifyColumnRequest) returns (.ModifyColumnResponse);
rpc ModifyColumn(.ModifyColumnRequest) returns (.ModifyColumnResponse);
rpc ModifyNamespace(.ModifyNamespaceRequest) returns (.ModifyNamespaceResponse);
rpc ModifyNamespace(.ModifyNamespaceRequest) returns (.ModifyNamespaceResponse);
rpc ModifyTable(.ModifyTableRequest) returns (.ModifyTableResponse);
rpc ModifyTable(.ModifyTableRequest) returns (.ModifyTableResponse);
r to dest.
rpc MoveRegion(.MoveRegionRequest) returns (.MoveRegionResponse);
rpc MoveRegion(.MoveRegionRequest) returns (.MoveRegionResponse);
rpc Multi(.MultiRequest) returns (.MultiResponse);
rpc Multi(.MultiRequest) returns (.MultiResponse);
HRegion.mutateRowsWithLocks(java.util.Collection, java.util.Collection)
and Coprocessor endpoints.MultiRowMutationProcessorRequestMultiRowMutationProcessorRequestMultiRowMutationProcessorResponseMultiRowMutationProcessorResponseMultiRowMutationServiceMutateRowsRequestMutateRowsRequestMutateRowsResponseMutateRowsResponseMultiTableInputFormats.MurmurHash.
MurmurHash3.
MUST_PASS_ALL = 1;
MUST_PASS_ONE = 2;
rpc Mutate(.MutateRequest) returns (.MutateResponse);
rpc Mutate(.MutateRequest) returns (.MutateResponse);
mutations against hbase:meta table.
rpc MutateRows(.MutateRowsRequest) returns (.MutateRowsResponse);
rpc MutateRows(.MutateRowsRequest) returns (.MutateRowsResponse);
Namespace = 2;
ThreadFactory that just builds daemon threads.
Canary.Monitor.
HFileBlock.BlockIterator.nextBlock() but checks block type, throws an
exception if incorrect, and returns the HFile block
NO_OP = 6;
NONE = 0;
NOT_EQUAL = 3;
Bytes.compareTo(byte[], byte[]).RegionObserver, MasterObserver, or WALObserver)
method.TokenUtil.obtainAndCacheToken(HConnection,User)
TokenUtil.obtainAuthTokenForJob(HConnection,User,Job)
instead.
TokenUtil.obtainAuthTokenForJob(HConnection,JobConf,User)
instead.
TokenUtil.obtainToken(HConnection)
TokenUtil.obtainTokenForJob(HConnection,User,Job)
TokenUtil.obtainTokenForJob(HConnection,JobConf,User)
OFFLINE = 0;
rpc OfflineRegion(.OfflineRegionRequest) returns (.OfflineRegionResponse);
rpc OfflineRegion(.OfflineRegionRequest) returns (.OfflineRegionResponse);
OPEN = 3;
OPENED = 0;
OPENED = 0;
OPENING = 2;
rpc OpenRegion(.OpenRegionRequest) returns (.OpenRegionResponse);
rpc OpenRegion(.OpenRegionRequest) returns (.OpenRegionResponse);
OR = 2;
byte[].byte[] of variable-length.OrderedBlob for use by Struct fields that
do not terminate the fields list.OrderedBytes encoding
implementations.float of 32-bits using a fixed-length encoding.double of 64-bits using a fixed-length encoding.short of 16-bits using a fixed-length encoding.int of 32-bits using a fixed-length encoding.long of 64-bits using a fixed-length encoding.byte of 8-bits using a fixed-length encoding.Number of arbitrary precision and variable-length encoding.String of variable-length.ResultScanner.next().src/main/protobuf definition files.DataOutput, just take plain OutputStream
Named oswrite so does not clash with KeyValue.write(KeyValue, DataOutput)
CodedOutputStream from a PositionedByteRange.
OWNED = 1;
T.family:qualifier form into separate byte arrays.
ParseFilterIOException.
ServerName from bytes
gotten from a call to ServerName.getVersionedBytes().
DataType API.DataType implementations backed by protobuf.PENDING_CLOSE = 4;
PENDING_OPEN = 1;
PoolMap maps a key to a collection of values, the elements
of which are managed by a pool.ReusablePool represents a PoolMap.Pool that builds
on the LinkedList class.ByteRange with additional methods to support tracking a
consumers position within the viewport.HRegion.closeRegionOperation(Operation).
RegionObserver.postCompact(ObserverContext, Store, StoreFile, CompactionRequest)
instead
StoreFiles to compact have been selected from the available
candidates.
RegionObserver.postCompactSelection(ObserverContext, Store, ImmutableList,
CompactionRequest) instead.
StoreFiles to be compacted have been selected from the available
candidates.
HMaster deletes a
table.
RegionObserver.preFlush(ObserverContext, Store, InternalScanner) instead.
HRegion.startRegionOperation().
HMaster truncates a
table.
WALEdit
replayed for this region.
WALEdit
is writen to WAL.
StoreFiles selected for compaction into a new
StoreFile.
RegionObserver.preCompact(ObserverContext, Store, InternalScanner,
ScanType, CompactionRequest) instead
StoreFiles selected for compaction into a new
StoreFile and prior to creating the scanner used to read the input files.
RegionObserver.preCompactScannerOpen(ObserverContext, Store, List, ScanType, long,
InternalScanner, CompactionRequest) instead.
RegionObserver.preCompactScannerOpen(ObserverContext, Store, List, ScanType, long, InternalScanner, CompactionRequest)
StoreFiles to compact from the list of
available candidates.
RegionObserver.preCompactSelection(ObserverContext, Store, List, CompactionRequest)
instead
StoreFiles for compaction from the list of currently
available candidates.
HMaster.
HMaster.
HMaster.
HMaster deletes a
namespace
It can't bypass the default action, e.g., ctx.bypass() won't have effect.
HMaster deletes a
table.
HMaster deletes a
table.
str
RegionObserver.preFlush(ObserverContext, Store, InternalScanner) instead
RegionObserver.preFlushScannerOpen(ObserverContext,
Store, KeyValueScanner, InternalScanner)
Callable.call() invocation.
rpc PrepareBulkLoad(.PrepareBulkLoadRequest) returns (.PrepareBulkLoadResponse);
rpc PrepareBulkLoad(.PrepareBulkLoadRequest) returns (.PrepareBulkLoadResponse);
ProtobufUtil.PB_MAGIC, to flag what
follows as a protobuf in hbase.
HMaster process.
ObserverContext.bypass() has no
effect in this hook.
HMaster process.
RegionObserver.preStoreScannerOpen(ObserverContext,
Store, Scan, NavigableSet, KeyValueScanner)
HMaster truncates a
table.
HMaster truncates a
table.
WALEdit
replayed for this region.
WALEdit
is writen to WAL.
Subprocedure on a member.rpc Process(.ProcessRequest) returns (.ProcessResponse);
rpc Process(.ProcessRequest) returns (.ProcessResponse);
HTableInterface.batch(java.util.List extends org.apache.hadoop.hbase.client.Row>, java.lang.Object[]) instead
Row implementations.
HTableInterface.batchCallback(java.util.List extends org.apache.hadoop.hbase.client.Row>, java.lang.Object[], org.apache.hadoop.hbase.client.coprocessor.Batch.Callback) instead
HTable.processBatchCallback(java.util.List extends org.apache.hadoop.hbase.client.Row>, java.lang.Object[], org.apache.hadoop.hbase.client.coprocessor.Batch.Callback) instead
val at index.
val at index.
length bytes from val into this range, starting at
index.
val at the next position in this range.
val in this range, starting at the next position.
length bytes from val into this range.
PUT = 4;
PUT = 2;
Put (HBase) from a TPut (Thrift)
TPuts (Thrift) into a list of Puts (HBase).
ps to the hbase:meta table.
Result.rawCells()
DataType for interacting with values encoded using
Bytes.putByte(byte[], int, byte).DataType for interacting with variable-length values
encoded using Bytes.putBytes(byte[], int, byte[], int, int).DataType that encodes fixed-length values encoded using
Bytes.putBytes(byte[], int, byte[], int, int).RawBytesFixedLength using the specified order
and length.
RawBytesFixedLength of the specified length.
DataType that encodes variable-length values encoded using
Bytes.putBytes(byte[], int, byte[], int, int).RawBytesTerminated using the specified terminator and
order.
RawBytesTerminated using the specified terminator and
order.
RawBytesTerminated using the specified terminator.
RawBytesTerminated using the specified terminator.
DataType for interacting with values encoded using
Bytes.putDouble(byte[], int, double).DataType for interacting with values encoded using
Bytes.putFloat(byte[], int, float).DataType for interacting with values encoded using
Bytes.putInt(byte[], int, int).DataType for interacting with values encoded using
Bytes.putLong(byte[], int, long).DataType for interacting with values encoded using
Bytes.putShort(byte[], int, short).DataType for interacting with values encoded using
Bytes.toBytes(String).DataType that encodes fixed-length values encoded using
Bytes.toBytes(String).RawStringFixedLength using the specified
order and length.
RawStringFixedLength of the specified length.
DataType that encodes variable-length values encoded using
Bytes.toBytes(String).RawStringTerminated using the specified terminator and
order.
RawStringTerminated using the specified terminator and
order.
RawStringTerminated using the specified terminator.
RawStringTerminated using the specified terminator.
off
Send a ping if timeout on read.
Base64.Base64InputStream.read() repeatedly until the end of stream is reached or
len bytes are read.
READ = 0;
BlockType.MAGIC_LENGTH from the given
stream and expects it to match this block type.
BlockType.MAGIC_LENGTH from the given
byte buffer and expects it to match this block type.
HColumnDescriptor.parseFrom(byte[]) instead.
HTableDescriptor.parseFrom(byte[]) instead.
HFile until the next
data block is found.
Writable instances
from the input stream.
HBaseProtos.SnapshotDescription stored for the snapshot in the passed directory
WritableUtils.readVLong(DataInput) but reads from a
ByteBuffer.
#readAsVLong() instead.
READY_TO_MERGE = 4;
READY_TO_SPLIT = 3;
OutputFormat.
CompareFilter implementations, such
as RowFilter, QualifierFilter, and ValueFilter, for
filtering based on the value of a given column.REGION_NAME = 1;
HRegion.Services
against a given table region.Callable.call().Services against a given region server.rpc RegionServerReport(.RegionServerReportRequest) returns (.RegionServerReportResponse);
rpc RegionServerReport(.RegionServerReportRequest) returns (.RegionServerReportResponse);
HRegionServerHRegionServer.rpc RegionServerStartup(.RegionServerStartupRequest) returns (.RegionServerStartupResponse);
rpc RegionServerStartup(.RegionServerStartupRequest) returns (.RegionServerStartupResponse);
GetLastFlushedSequenceIdRequestGetLastFlushedSequenceIdRequestGetLastFlushedSequenceIdResponseGetLastFlushedSequenceIdResponseRegionServerReportRequestRegionServerReportRequestRegionServerReportResponseRegionServerReportResponseRegionServerStartupRequestRegionServerStartupRequestRegionServerStartupResponseRegionServerStartupResponseRegionServerStatusServiceRegionStateTransitionRegionStateTransitionRegionStateTransition.TransitionCodeReportRegionStateTransitionRequestReportRegionStateTransitionRequestReportRegionStateTransitionResponseReportRegionStateTransitionResponseReportRSFatalErrorRequestReportRSFatalErrorRequestReportRSFatalErrorResponseReportRSFatalErrorResponseRegionSplitter class provides several utilities to help in the
administration lifecycle for developers who choose to manually split regions
instead of having HBase handle that automatically.RegionSplitter.SplitAlgorithm for choosing region
boundaries.EventType.
Service subclass as a master coprocessor endpoint.
Service subclass as a coprocessor endpoint to
be available for handling
HRegion.execService(com.google.protobuf.RpcController,
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall)} calls.
Service subclass as a coprocessor endpoint to
be available for handling
RemoteException with some extra information.Constraints that have been added to the table
and turn off the constraint processing.
HTableDescriptor.values map
HTableDescriptor.values map
HTableDescriptor.values map
repeated .Action action = 3;
repeated .NameBytesPair attribute = 3;
repeated .NameBytesPair attribute = 5;
repeated .NameBytesPair attribute = 2;
repeated .BytesBytesPair attributes = 2;
repeated .BytesBytesPair attributes = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;
repeated .ServerName backup_masters = 8;
repeated .Cell cell = 1;
repeated .UUID cluster_ids = 8;
repeated .Column column = 2;
repeated .Column column = 1;
repeated .ColumnFamilySchema column_families = 3;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;
repeated .MutationProto.ColumnValue column_value = 3;
HColumnDescriptor.configuration map.
HTableDescriptor.configuration map
NamespaceDescriptor.configuration map
repeated .NameStringPair configuration = 3;
repeated .NameStringPair configuration = 2;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .NameStringPair configuration = 4;
repeated .Coprocessor coprocessors = 6;
repeated .BytesBytesPair data = 3;
repeated .ServerName dead_servers = 3;
repeated .WALEntry entry = 1;
repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 2;
repeated .BulkLoadHFileRequest.FamilyPath family_path = 1;
repeated .ServerName favored_node = 1;
repeated .ServerName favored_nodes = 3;
repeated .ServerName favored_nodes = 2;
repeated .Filter filters = 2;
repeated .BytesBytesPair fuzzy_keys_data = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;
repeated .LiveServerInfo live_servers = 2;
repeated .NameStringPair map_entries = 1;
repeated .BytesBytesPair map_entry = 1;
repeated .Coprocessor master_coprocessors = 6;
repeated .NameInt64Pair metrics = 1;
repeated .MutationProto mutation_request = 1;
repeated .NamespaceDescriptor namespaceDescriptor = 1;
repeated .OpenRegionRequest.RegionOpenInfo open_info = 1;
repeated .Permission permission = 1;
repeated .Permission permissions = 2;
repeated .MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
repeated .RegionAction regionAction = 1;
repeated .RegionActionResult regionActionResult = 1;
repeated .RegionInfo region_info = 1;
repeated .RegionInfo region_info = 2;
repeated .RegionLoad region_loads = 5;
repeated .SnapshotRegionManifest region_manifests = 2;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;
repeated .RegionInTransition regions_in_transition = 4;
repeated .ReplicationLoadSource replLoadSource = 10;
repeated .RegionActionResult result = 1;
repeated .ResultOrException resultOrException = 1;
repeated .Result results = 5;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;
repeated .FamilyScope scopes = 6;
repeated .SnapshotDescription snapshots = 1;
repeated .SnapshotRegionManifest.StoreFile store_files = 2;
repeated .StoreSequenceId store_sequence_id = 2;
repeated .TableName tableName = 1;
repeated .TableName table_names = 1;
repeated .TableName table_names = 1;
repeated .TableSchema table_schema = 1;
repeated .TableSchema tableSchema = 1;
repeated .StackTraceElementMessage trace = 4;
repeated .RegionStateTransition transition = 2;
repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1;
repeated .UserAuthorizations userAuths = 1;
repeated .UserPermission user_permission = 1;
repeated .UsersAndPermissions.UserPermissions user_permissions = 1;
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;
repeated .VisibilityLabel visLabel = 1;
rpc Replay(.ReplicateWALEntryRequest) returns (.ReplicateWALEntryResponse);
rpc Replay(.ReplicateWALEntryRequest) returns (.ReplicateWALEntryResponse);
rpc ReplicateWALEntry(.ReplicateWALEntryRequest) returns (.ReplicateWALEntryResponse);
rpc ReplicateWALEntry(.ReplicateWALEntryRequest) returns (.ReplicateWALEntryResponse);
REPLICATION_SCOPE_GLOBAL = 1;
REPLICATION_SCOPE_LOCAL = 0;
ReplicationEndpoint.replicate(ReplicateContext) method.SinkPeer as being bad (i.e.
rpc ReportRegionStateTransition(.ReportRegionStateTransitionRequest) returns (.ReportRegionStateTransitionResponse);
rpc ReportRegionStateTransition(.ReportRegionStateTransitionRequest) returns (.ReportRegionStateTransitionResponse);
rpc ReportRSFatalError(.ReportRSFatalErrorRequest) returns (.ReportRSFatalErrorResponse);
rpc ReportRSFatalError(.ReportRSFatalErrorRequest) returns (.ReportRSFatalErrorResponse);
SinkPeer successfully replicated a chunk of data.
KeyValueScanner.seek(org.apache.hadoop.hbase.KeyValue) (or KeyValueScanner.reseek(org.apache.hadoop.hbase.KeyValue) if forward is true) but only
does a seek operation after checking that it is really necessary for the
row/column combination specified by the kv parameter.
KeyValueScanner.seek(org.apache.hadoop.hbase.KeyValue) (or KeyValueScanner.reseek(org.apache.hadoop.hbase.KeyValue) if forward is true) but only
does a seek operation after checking that it is really necessary for the
row/column combination specified by the kv parameter.
KeyValueHeap.seek(KeyValue) function except
that scanner.seek(seekKey) is changed to scanner.reseek(seekKey).
key.
DefaultEnvironmentEdge.
RESIGNED = 2;
rpc RestoreSnapshot(.RestoreSnapshotRequest) returns (.RestoreSnapshotResponse);
rpc RestoreSnapshot(.RestoreSnapshotRequest) returns (.RestoreSnapshotResponse);
Get or Scan query.Result.rawCells().
Result.create(List) instead.
Result.create(List) instead.
TResult (Thrift) from a Result (HBase).
Results (HBase) into a list of TResults (Thrift).
Result with some statistics about the server/region statusForeignExceptionSnare.
SnapshotSentinel.getExceptionIfFailed().
RetriesExhaustedException
is thrown when we have more information about which rows were causing which
exceptions on what servers.Scan's start row maybe changed.
rpc Revoke(.RevokeRequest) returns (.RevokeResponse);
rpc Revoke(.RevokeRequest) returns (.RevokeResponse);
AccessControlClient.revoke(Configuration, TableName, String, byte[], byte[], Permission.Action...) instead
rpc RollWALWriter(.RollWALWriterRequest) returns (.RollWALWriterResponse);
rpc RollWALWriter(.RollWALWriterRequest) returns (.RollWALWriterResponse);
ROW = 0;
RowMutations (HBase) from a TRowMutations (Thrift)
ProcessRequestProcessRequestProcessResponseProcessResponseRowProcessorServiceclusterId with the default SocketFactory
clusterId with the default SocketFactory
PayloadCarryingRpcControllerCellBlockMetaCellBlockMetaConnectionHeaderConnectionHeaderExceptionResponseExceptionResponseRequestHeaderRequestHeaderResponseHeaderResponseHeaderUserInformationUserInformationVersionInfoVersionInfoRpcRetryingCallerRpcScheduler for
a region server.BlockingService and its associated class of
protobuf service interface.null in the case of
an error.
stdout.
rpc RunCatalogScan(.RunCatalogScanRequest) returns (.RunCatalogScanResponse);
rpc RunCatalogScan(.RunCatalogScanRequest) returns (.RunCatalogScanResponse);
rpc Scan(.ScanRequest) returns (.ScanResponse);
rpc Scan(.ScanRequest) returns (.ScanResponse);
ScannerCallable.ScannerCallable(HConnection, TableName, Scan,
ScanMetrics, PayloadCarryingRpcController)
org.apache.hadoop.hbase.rest.protobuf.generated.Scannerorg.apache.hadoop.hbase.rest.protobuf.generated.ScannerScanQueryMatcher.match(org.apache.hadoop.hbase.KeyValue) return codes.rpc SecureBulkLoadHFiles(.SecureBulkLoadHFilesRequest) returns (.SecureBulkLoadHFilesResponse);
rpc SecureBulkLoadHFiles(.SecureBulkLoadHFilesRequest) returns (.SecureBulkLoadHFilesResponse);
CleanupBulkLoadRequestCleanupBulkLoadRequestCleanupBulkLoadResponseCleanupBulkLoadResponseDelegationTokenDelegationTokenPrepareBulkLoadRequestPrepareBulkLoadRequestPrepareBulkLoadResponsePrepareBulkLoadResponseSecureBulkLoadHFilesRequestSecureBulkLoadHFilesRequestSecureBulkLoadHFilesResponseSecureBulkLoadHFilesResponseSecureBulkLoadServicek[0] ..
- seekBefore(byte[], int, int) -
Method in interface org.apache.hadoop.hbase.io.hfile.HFileScanner
-
- seekForwardTo(Cell) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.decode.PrefixTreeArraySearcher
-
- seekForwardTo(Cell) -
Method in interface org.apache.hadoop.hbase.codec.prefixtree.scanner.CellSearcher
- Note: Added for backwards compatibility with
KeyValueScanner.reseek(org.apache.hadoop.hbase.KeyValue)
Look for the key, but only look after the current position.
- seekForwardToOrAfter(Cell) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.decode.PrefixTreeArraySearcher
-
- seekForwardToOrAfter(Cell) -
Method in interface org.apache.hadoop.hbase.codec.prefixtree.scanner.CellSearcher
- Same as seekForwardTo(..), but go to the extra effort of finding the next key if there's no
exact match.
- seekForwardToOrBefore(Cell) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.decode.PrefixTreeArraySearcher
-
- seekForwardToOrBefore(Cell) -
Method in interface org.apache.hadoop.hbase.codec.prefixtree.scanner.CellSearcher
- Same as seekForwardTo(..), but go to the extra effort of finding the next key if there's no
exact match.
- seekOnFs(long) -
Method in class org.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader
-
- seekOnFs(long) -
Method in class org.apache.hadoop.hbase.regionserver.wal.ReaderBase
- Performs a filesystem-level seek to a certain position in an underlying file.
- seekOnFs(long) -
Method in class org.apache.hadoop.hbase.regionserver.wal.SequenceFileLogReader
-
- seekScanners(List<? extends KeyValueScanner>, KeyValue, boolean, boolean) -
Method in class org.apache.hadoop.hbase.regionserver.StoreScanner
- Seek the specified scanners with the given key
- seekTo(byte[]) -
Method in class org.apache.hadoop.hbase.io.hfile.AbstractHFileReader.Scanner
-
- seekTo(byte[], int, int, boolean) -
Method in class org.apache.hadoop.hbase.io.hfile.HFileReaderV2.AbstractScannerV2
- An internal API function.
- seekTo(byte[], int, int) -
Method in class org.apache.hadoop.hbase.io.hfile.HFileReaderV2.AbstractScannerV2
-
- seekTo() -
Method in class org.apache.hadoop.hbase.io.hfile.HFileReaderV2.EncodedScannerV2
-
- seekTo() -
Method in class org.apache.hadoop.hbase.io.hfile.HFileReaderV2.ScannerV2
- Positions this scanner at the start of the file.
- seekTo(byte[]) -
Method in interface org.apache.hadoop.hbase.io.hfile.HFileScanner
- SeekTo or just before the passed
key.
- seekTo(byte[], int, int) -
Method in interface org.apache.hadoop.hbase.io.hfile.HFileScanner
-
- seekTo() -
Method in interface org.apache.hadoop.hbase.io.hfile.HFileScanner
- Positions this scanner at the start of the file.
- seekToDataBlock(byte[], int, int, HFileBlock, boolean, boolean, boolean) -
Method in class org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader
- Return the data block which contains this key.
- seekToKeyInBlock(byte[], int, int, boolean) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeSeeker
- Seek forward only (should be called reseekToKeyInBlock?).
- seekToKeyInBlock(byte[], int, int, boolean) -
Method in interface org.apache.hadoop.hbase.io.encoding.DataBlockEncoder.EncodedSeeker
- Moves the seeker position within the current block to:
the last key that that is less than or equal to the given key if
seekBefore is false
the last key that is strictly less than the given key if
seekBefore is true.
- seekToLastRow() -
Method in interface org.apache.hadoop.hbase.regionserver.KeyValueScanner
- Seek the scanner at the first KeyValue of last row
- seekToLastRow() -
Method in class org.apache.hadoop.hbase.regionserver.MemStore.MemStoreScanner
-
- seekToLastRow() -
Method in class org.apache.hadoop.hbase.regionserver.NonReversedNonLazyKeyValueScanner
-
- seekToLastRow() -
Method in class org.apache.hadoop.hbase.regionserver.ReversedKeyValueHeap
-
- seekToLastRow() -
Method in class org.apache.hadoop.hbase.regionserver.StoreFileScanner
-
- seekToNextRow(KeyValue) -
Method in class org.apache.hadoop.hbase.regionserver.StoreScanner
-
- seekToOrBeforeUsingPositionAtOrAfter(byte[], int, int, boolean) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeSeeker
-
- seekToOrBeforeUsingPositionAtOrBefore(byte[], int, int, boolean) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeSeeker
-
- seekToPreviousRow(KeyValue) -
Method in interface org.apache.hadoop.hbase.regionserver.KeyValueScanner
- Seek the scanner at the first KeyValue of the row which is the previous row
of specified key
- seekToPreviousRow(KeyValue) -
Method in class org.apache.hadoop.hbase.regionserver.MemStore.MemStoreScanner
- Separately get the KeyValue before the specified key from kvset and
snapshotset, and use the row of higher one as the previous row of
specified key, then seek to the first KeyValue of previous row
- seekToPreviousRow(KeyValue) -
Method in class org.apache.hadoop.hbase.regionserver.NonReversedNonLazyKeyValueScanner
-
- seekToPreviousRow(KeyValue) -
Method in class org.apache.hadoop.hbase.regionserver.ReversedKeyValueHeap
-
- seekToPreviousRow(KeyValue) -
Method in class org.apache.hadoop.hbase.regionserver.StoreFileScanner
-
- select(List<StoreFile>, boolean, boolean, boolean) -
Method in class org.apache.hadoop.hbase.regionserver.compactions.CompactionContext
- Called to select files for compaction.
- selectCompaction(Collection<StoreFile>, List<StoreFile>, boolean, boolean, boolean) -
Method in class org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy
-
- selectCompaction(StripeCompactionPolicy.StripeInformationProvider, List<StoreFile>, boolean) -
Method in class org.apache.hadoop.hbase.regionserver.compactions.StripeCompactionPolicy
-
- selectFlush(StripeCompactionPolicy.StripeInformationProvider, int) -
Method in class org.apache.hadoop.hbase.regionserver.compactions.StripeCompactionPolicy
-
- SELECTOR_THREADS_CONF_KEY -
Static variable in class org.apache.hadoop.hbase.thrift.HThreadedSelectorServerArgs
- Number of selector threads for reading and writing socket
- selectScannersFrom(List<? extends KeyValueScanner>) -
Method in class org.apache.hadoop.hbase.regionserver.StoreScanner
- Filters the given list of scanners using Bloom filter, time range, and
TTL.
- selectSingleStripeCompaction(StripeCompactionPolicy.StripeInformationProvider, boolean, boolean, boolean) -
Method in class org.apache.hadoop.hbase.regionserver.compactions.StripeCompactionPolicy
-
- selectToken(Text, Collection<Token<? extends TokenIdentifier>>) -
Method in class org.apache.hadoop.hbase.security.token.AuthenticationTokenSelector
-
- send_append(ByteBuffer, TAppend) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.Client
-
- send_atomicIncrement(ByteBuffer, ByteBuffer, ByteBuffer, long) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.Client
-
- send_checkAndDelete(ByteBuffer, ByteBuffer, ByteBuffer, ByteBuffer, ByteBuffer, TDelete) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.Client
-
- send_checkAndPut(ByteBuffer, ByteBuffer, ByteBuffer, ByteBuffer, ByteBuffer, TPut) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.Client
-
- send_closeScanner(int) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.Client
-
- send_compact(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.Client
-
- send_createTable(ByteBuffer, List<ColumnDescriptor>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.Client
-
- send_deleteAll(ByteBuffer, ByteBuffer, ByteBuffer, Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.Client
-
- send_deleteAllRow(ByteBuffer, ByteBuffer, Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.Client
-
- send_deleteAllRowTs(ByteBuffer, ByteBuffer, long, Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.Client
-
- send_deleteAllTs(ByteBuffer, ByteBuffer, ByteBuffer, long, Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.Client
-
- send_deleteMultiple(ByteBuffer, List<TDelete>) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.Client
-
- send_deleteSingle(ByteBuffer, TDelete) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.Client
-
- send_deleteTable(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.Client
-
- send_disableTable(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.Client
-
- send_enableTable(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.Client
-
- send_exists(ByteBuffer, TGet) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.Client
-
- send_get(ByteBuffer, ByteBuffer, ByteBuffer, Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.Client
-
- send_get(ByteBuffer, TGet) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.Client
-
- send_getColumnDescriptors(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.Client
-
- send_getMultiple(ByteBuffer, List<TGet>) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.Client
-
- send_getRegionInfo(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.Client
-
- send_getRow(ByteBuffer, ByteBuffer, Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.Client
-
- send_getRowOrBefore(ByteBuffer, ByteBuffer, ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.Client
-
- send_getRows(ByteBuffer, List<ByteBuffer>, Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.Client
-
- send_getRowsTs(ByteBuffer, List<ByteBuffer>, long, Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.Client
-
- send_getRowsWithColumns(ByteBuffer, List<ByteBuffer>, List<ByteBuffer>, Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.Client
-
- send_getRowsWithColumnsTs(ByteBuffer, List<ByteBuffer>, List<ByteBuffer>, long, Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.Client
-
- send_getRowTs(ByteBuffer, ByteBuffer, long, Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.Client
-
- send_getRowWithColumns(ByteBuffer, ByteBuffer, List<ByteBuffer>, Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.Client
-
- send_getRowWithColumnsTs(ByteBuffer, ByteBuffer, List<ByteBuffer>, long, Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.Client
-
- send_getScannerResults(ByteBuffer, TScan, int) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.Client
-
- send_getScannerRows(int, int) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.Client
-
- send_getTableNames() -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.Client
-
- send_getTableRegions(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.Client
-
- send_getVer(ByteBuffer, ByteBuffer, ByteBuffer, int, Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.Client
-
- send_getVerTs(ByteBuffer, ByteBuffer, ByteBuffer, long, int, Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.Client
-
- send_increment(TIncrement) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.Client
-
- send_increment(ByteBuffer, TIncrement) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.Client
-
- send_incrementRows(List<TIncrement>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.Client
-
- send_isTableEnabled(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.Client
-
- send_majorCompact(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.Client
-
- send_mutateRow(ByteBuffer, ByteBuffer, List<Mutation>, Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.Client
-
- send_mutateRow(ByteBuffer, TRowMutations) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.Client
-
- send_mutateRows(ByteBuffer, List<BatchMutation>, Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.Client
-
- send_mutateRowsTs(ByteBuffer, List<BatchMutation>, long, Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.Client
-
- send_mutateRowTs(ByteBuffer, ByteBuffer, List<Mutation>, long, Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.Client
-
- send_openScanner(ByteBuffer, TScan) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.Client
-
- send_put(ByteBuffer, TPut) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.Client
-
- send_putMultiple(ByteBuffer, List<TPut>) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.Client
-
- send_scannerClose(int) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.Client
-
- send_scannerGet(int) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.Client
-
- send_scannerGetList(int, int) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.Client
-
- send_scannerOpen(ByteBuffer, ByteBuffer, List<ByteBuffer>, Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.Client
-
- send_scannerOpenTs(ByteBuffer, ByteBuffer, List<ByteBuffer>, long, Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.Client
-
- send_scannerOpenWithPrefix(ByteBuffer, ByteBuffer, List<ByteBuffer>, Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.Client
-
- send_scannerOpenWithScan(ByteBuffer, TScan, Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.Client
-
- send_scannerOpenWithStop(ByteBuffer, ByteBuffer, ByteBuffer, List<ByteBuffer>, Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.Client
-
- send_scannerOpenWithStopTs(ByteBuffer, ByteBuffer, ByteBuffer, List<ByteBuffer>, long, Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.Client
-
- sendAbortToMembers(Procedure, ForeignException) -
Method in interface org.apache.hadoop.hbase.procedure.ProcedureCoordinatorRpcs
- Notify the members that the coordinator has aborted the procedure and that it should release
barrier resources.
- sendAbortToMembers(Procedure, ForeignException) -
Method in class org.apache.hadoop.hbase.procedure.ZKProcedureCoordinatorRpcs
- This is the abort message being sent by the coordinator to member
TODO this code isn't actually used but can be used to issue a cancellation from the
coordinator.
- sendError(int, String) -
Method in class org.apache.hadoop.hbase.rest.filter.GZIPResponseWrapper
-
- sendError(int) -
Method in class org.apache.hadoop.hbase.rest.filter.GZIPResponseWrapper
-
- sendGlobalBarrierAcquire(Procedure, byte[], List<String>) -
Method in interface org.apache.hadoop.hbase.procedure.ProcedureCoordinatorRpcs
- Notify the members to acquire barrier for the procedure
- sendGlobalBarrierAcquire(Procedure, byte[], List<String>) -
Method in class org.apache.hadoop.hbase.procedure.ZKProcedureCoordinatorRpcs
- The "acquire" phase.
- sendGlobalBarrierComplete() -
Method in class org.apache.hadoop.hbase.procedure.Procedure
- Sends a message to members that all
Subprocedure.insideBarrier() calls have completed.
- sendGlobalBarrierReached() -
Method in class org.apache.hadoop.hbase.procedure.Procedure
- Sends a message to all members that the global barrier condition has been satisfied.
- sendGlobalBarrierReached(Procedure, List<String>) -
Method in interface org.apache.hadoop.hbase.procedure.ProcedureCoordinatorRpcs
- Notify members that all members have acquired their parts of the barrier and that they can
now execute under the global barrier.
- sendGlobalBarrierReached(Procedure, List<String>) -
Method in class org.apache.hadoop.hbase.procedure.ZKProcedureCoordinatorRpcs
-
- sendGlobalBarrierStart() -
Method in class org.apache.hadoop.hbase.procedure.Procedure
- Sends a message to Members to create a new
Subprocedure for this Procedure and execute
the Subprocedure.acquireBarrier() step.
- sendMemberAborted(Subprocedure, ForeignException) -
Method in interface org.apache.hadoop.hbase.procedure.ProcedureMemberRpcs
- Notify the coordinator that we aborted the specified
Subprocedure
- sendMemberAborted(Subprocedure, ForeignException) -
Method in class org.apache.hadoop.hbase.procedure.ZKProcedureMemberRpcs
- This should be called by the member and should write a serialized root cause exception as
to the abort znode.
- sendMemberAcquired(Subprocedure) -
Method in interface org.apache.hadoop.hbase.procedure.ProcedureMemberRpcs
- Notify the coordinator that the specified
Subprocedure has acquired the locally required
barrier condition.
- sendMemberAcquired(Subprocedure) -
Method in class org.apache.hadoop.hbase.procedure.ZKProcedureMemberRpcs
- This attempts to create an acquired state znode for the procedure (snapshot name).
- sendMemberCompleted(Subprocedure) -
Method in interface org.apache.hadoop.hbase.procedure.ProcedureMemberRpcs
- Notify the coordinator that the specified
Subprocedure has completed the work that
needed to be done under the global barrier.
- sendMemberCompleted(Subprocedure) -
Method in class org.apache.hadoop.hbase.procedure.ZKProcedureMemberRpcs
- This acts as the ack for a completed snapshot
- sendPing() -
Method in class org.apache.hadoop.hbase.ipc.RpcClient.Connection
-
- sendRedirect(String) -
Method in class org.apache.hadoop.hbase.rest.filter.GZIPResponseWrapper
-
- sendRegionClose(ServerName, HRegionInfo, int, ServerName, boolean) -
Method in class org.apache.hadoop.hbase.master.ServerManager
- Sends an CLOSE RPC to the specified server to close the specified region.
- sendRegionClose(ServerName, HRegionInfo, int) -
Method in class org.apache.hadoop.hbase.master.ServerManager
-
- sendRegionOpen(ServerName, HRegionInfo, int, List<ServerName>) -
Method in class org.apache.hadoop.hbase.master.ServerManager
- Sends an OPEN RPC to the specified server to open the specified region.
- sendRegionOpen(ServerName, List<Triple<HRegionInfo, Integer, List<ServerName>>>) -
Method in class org.apache.hadoop.hbase.master.ServerManager
- Sends an OPEN RPC to the specified server to open the specified region.
- sendRegionsMerge(ServerName, HRegionInfo, HRegionInfo, boolean) -
Method in class org.apache.hadoop.hbase.master.ServerManager
- Sends an MERGE REGIONS RPC to the specified server to merge the specified
regions.
- SENT_BYTES_DESC -
Static variable in interface org.apache.hadoop.hbase.ipc.MetricsHBaseServerSource
-
- SENT_BYTES_NAME -
Static variable in interface org.apache.hadoop.hbase.ipc.MetricsHBaseServerSource
-
- sentBytes(long) -
Method in interface org.apache.hadoop.hbase.ipc.MetricsHBaseServerSource
-
- sentBytes(long) -
Method in class org.apache.hadoop.hbase.ipc.MetricsHBaseServerSourceImpl
-
- separator() -
Method in class org.apache.hadoop.hbase.util.RegionSplitter.HexStringSplit
-
- separator() -
Method in interface org.apache.hadoop.hbase.util.RegionSplitter.SplitAlgorithm
-
- separator() -
Method in class org.apache.hadoop.hbase.util.RegionSplitter.UniformSplit
-
- SEPARATOR_CONF_KEY -
Static variable in class org.apache.hadoop.hbase.mapreduce.ImportTsv
-
- SEQ_ID -
Static variable in class org.apache.hadoop.hbase.regionserver.StoreFile.Comparators
- Comparator that compares based on the Sequence Ids of the
the StoreFiles.
- seqids -
Static variable in class org.apache.hadoop.hbase.executor.EventHandler
-
- SEQNUM_QUALIFIER -
Static variable in class org.apache.hadoop.hbase.HConstants
- The open seqnum column qualifier
- SEQUENCE_ID_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId
-
- SEQUENCE_NUMBER_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos.TokenIdentifier
-
- SequenceFileLogReader - Class in org.apache.hadoop.hbase.regionserver.wal
-
- SequenceFileLogReader() -
Constructor for class org.apache.hadoop.hbase.regionserver.wal.SequenceFileLogReader
-
- sequenceID -
Variable in class org.apache.hadoop.hbase.regionserver.StoreFile.Reader
-
- sequenceIdChecker -
Variable in class org.apache.hadoop.hbase.regionserver.wal.HLogSplitter
-
- sequenceNumber -
Variable in class org.apache.hadoop.hbase.security.token.AuthenticationTokenIdentifier
-
- serialize(String, Throwable) -
Static method in exception org.apache.hadoop.hbase.errorhandling.ForeignException
- Converts a ForeignException to an array of bytes.
- serialize(ByteBuffer) -
Method in interface org.apache.hadoop.hbase.io.hfile.Cacheable
- Serializes its data into destination.
- serialize(ByteBuffer) -
Method in class org.apache.hadoop.hbase.io.hfile.HFileBlock
-
- serialize(KeyValue) -
Method in class org.apache.hadoop.hbase.mapreduce.KeyValueSerialization.KeyValueSerializer
-
- SERIALIZED_COMPARATOR_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos.Comparator
-
- SERIALIZED_FILTER_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter
-
- serializeExtraInfo(ByteBuffer) -
Method in class org.apache.hadoop.hbase.io.hfile.HFileBlock
-
- serve() -
Method in class org.apache.hadoop.hbase.thrift.TBoundedThreadPoolServer
-
- server -
Variable in class org.apache.hadoop.hbase.executor.EventHandler
-
- server -
Variable in class org.apache.hadoop.hbase.master.AssignmentManager
-
- server -
Variable in class org.apache.hadoop.hbase.master.BulkAssigner
-
- Server - Interface in org.apache.hadoop.hbase
- Defines the set of shared functions implemented by HBase servers (Masters
and RegionServers).
- SERVER_CURRENT_TIME_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest
-
- SERVER_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo
-
- SERVER_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest
-
- SERVER_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest
-
- SERVER_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest
-
- SERVER_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer
-
- SERVER_INFO_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse
-
- SERVER_LOAD_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo
-
- SERVER_NAME_DESC -
Static variable in interface org.apache.hadoop.hbase.master.MetricsMasterSource
-
- SERVER_NAME_DESC -
Static variable in interface org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource
-
- SERVER_NAME_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo
-
- SERVER_NAME_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition
-
- SERVER_NAME_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask
-
- SERVER_NAME_NAME -
Static variable in interface org.apache.hadoop.hbase.master.MetricsMasterSource
-
- SERVER_NAME_NAME -
Static variable in interface org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource
-
- SERVER_QUALIFIER -
Static variable in class org.apache.hadoop.hbase.HConstants
- The server column qualifier
- SERVER_START_CODE_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest
-
- serverAdded(ServerName) -
Method in interface org.apache.hadoop.hbase.master.ServerListener
- The server has joined the cluster.
- ServerCommandLine - Class in org.apache.hadoop.hbase.util
- Base class for command lines that start up various HBase daemons.
- ServerCommandLine() -
Constructor for class org.apache.hadoop.hbase.util.ServerCommandLine
-
- serverHasMoreResults -
Variable in class org.apache.hadoop.hbase.client.RegionServerCallable
-
- serverHasMoreResultsContext -
Variable in class org.apache.hadoop.hbase.client.RegionServerCallable
-
- ServerListener - Interface in org.apache.hadoop.hbase.master
- Get notification of server events.
- ServerLoad - Class in org.apache.hadoop.hbase
- This class is used for exporting current state of load on a RegionServer.
- ServerLoad(ClusterStatusProtos.ServerLoad) -
Constructor for class org.apache.hadoop.hbase.ServerLoad
-
- serverLoad -
Variable in class org.apache.hadoop.hbase.ServerLoad
-
- ServerManager - Class in org.apache.hadoop.hbase.master
- The ServerManager class manages info about region servers.
- ServerManager(Server, MasterServices) -
Constructor for class org.apache.hadoop.hbase.master.ServerManager
- Constructor.
- serverManager -
Variable in class org.apache.hadoop.hbase.tmpl.master.MasterStatusTmpl
-
- ServerMetricsTmpl - Class in org.apache.hadoop.hbase.tmpl.regionserver
-
- ServerMetricsTmpl(TemplateManager) -
Constructor for class org.apache.hadoop.hbase.tmpl.regionserver.ServerMetricsTmpl
-
- ServerMetricsTmpl() -
Constructor for class org.apache.hadoop.hbase.tmpl.regionserver.ServerMetricsTmpl
-
- ServerMetricsTmpl.ImplData - Class in org.apache.hadoop.hbase.tmpl.regionserver
-
- ServerMetricsTmpl.ImplData() -
Constructor for class org.apache.hadoop.hbase.tmpl.regionserver.ServerMetricsTmpl.ImplData
-
- ServerMetricsTmpl.Intf - Interface in org.apache.hadoop.hbase.tmpl.regionserver
-
- ServerMetricsTmplImpl - Class in org.apache.hadoop.hbase.tmpl.regionserver
-
- ServerMetricsTmplImpl(TemplateManager, ServerMetricsTmpl.ImplData) -
Constructor for class org.apache.hadoop.hbase.tmpl.regionserver.ServerMetricsTmplImpl
-
- serverName -
Variable in class org.apache.hadoop.hbase.master.handler.ServerShutdownHandler
-
- ServerName - Class in org.apache.hadoop.hbase
- Instance of an HBase ServerName.
- serverName -
Variable in class org.apache.hadoop.hbase.thrift.generated.TRegionInfo
-
- SERVERNAME_PATTERN -
Static variable in class org.apache.hadoop.hbase.ServerName
-
- SERVERNAME_QUALIFIER -
Static variable in class org.apache.hadoop.hbase.HConstants
- The serverName column qualifier.
- SERVERNAME_SEPARATOR -
Static variable in class org.apache.hadoop.hbase.ServerName
- This character is used as separator between server hostname, port and
startcode.
- ServerNonceManager - Class in org.apache.hadoop.hbase.regionserver
- Implementation of nonce manager that stores nonces in a hash map and cleans them up after
some time; if nonce group/client ID is supplied, nonces are stored by client ID.
- ServerNonceManager(Configuration) -
Constructor for class org.apache.hadoop.hbase.regionserver.ServerNonceManager
-
- ServerNotRunningYetException - Exception in org.apache.hadoop.hbase.ipc
-
- ServerNotRunningYetException(String) -
Constructor for exception org.apache.hadoop.hbase.ipc.ServerNotRunningYetException
-
- serverOffline(ZooKeeperWatcher, ServerName) -
Method in class org.apache.hadoop.hbase.master.RegionStates
- A server is offline, all regions on it are dead.
- serverRemoved(ServerName) -
Method in interface org.apache.hadoop.hbase.master.ServerListener
- The server was removed from the cluster.
- ServerRpcController - Class in org.apache.hadoop.hbase.ipc
- Used for server-side protobuf RPC service invocations.
- ServerRpcController() -
Constructor for class org.apache.hadoop.hbase.ipc.ServerRpcController
-
- servers -
Variable in class org.apache.hadoop.hbase.tmpl.master.MasterStatusTmpl
-
- servers -
Variable in class org.apache.hadoop.hbase.tmpl.master.RegionServerListTmpl
-
- ServerShutdownHandler - Class in org.apache.hadoop.hbase.master.handler
- Process server shutdown.
- ServerShutdownHandler(Server, MasterServices, DeadServer, ServerName, boolean) -
Constructor for class org.apache.hadoop.hbase.master.handler.ServerShutdownHandler
-
- SERVERSTARTCODE_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest
-
- SERVERSTARTCODE_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest
-
- ServerStatistics - Class in org.apache.hadoop.hbase.client.backoff
- Track the statistics for a single region
- ServerStatistics() -
Constructor for class org.apache.hadoop.hbase.client.backoff.ServerStatistics
-
- ServerStatistics.RegionStatistics - Class in org.apache.hadoop.hbase.client.backoff
-
- ServerStatistics.RegionStatistics() -
Constructor for class org.apache.hadoop.hbase.client.backoff.ServerStatistics.RegionStatistics
-
- ServerStatisticTracker - Class in org.apache.hadoop.hbase.client
- Tracks the statistics for multiple regions
- ServerStatisticTracker() -
Constructor for class org.apache.hadoop.hbase.client.ServerStatisticTracker
-
- SERVERVERSION_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version
-
- service -
Variable in class org.apache.hadoop.hbase.regionserver.HRegionServer
-
- service(HttpServletRequest, HttpServletResponse) -
Method in class org.apache.hadoop.hbase.rest.RESTServletContainer
- This container is used only if authentication and
impersonation is enabled.
- SERVICE_CALL_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Action
-
- SERVICE_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos.Token
-
- SERVICE_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.DelegationToken
-
- SERVICE_NAME_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall
-
- SERVICE_NAME_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ConnectionHeader
-
- SERVICE_RESULT_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrException
-
- services -
Variable in class org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer
-
- services -
Variable in class org.apache.hadoop.hbase.master.handler.LogReplayHandler
-
- services -
Variable in class org.apache.hadoop.hbase.master.handler.ServerShutdownHandler
-
- services -
Static variable in class org.apache.hadoop.hbase.security.HBasePolicyProvider
-
- set(TokenizerRowSearchPosition, TokenizerNode) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.encode.tokenize.TokenizerRowSearchResult
-
- set(String, String) -
Method in class org.apache.hadoop.hbase.CompoundConfiguration
-
- set(byte[]) -
Method in class org.apache.hadoop.hbase.io.ImmutableBytesWritable
-
- set(byte[], int, int) -
Method in class org.apache.hadoop.hbase.io.ImmutableBytesWritable
-
- set(int) -
Method in interface org.apache.hadoop.hbase.util.ByteRange
- Reuse this
ByteRange over a new byte[].
- set(byte[]) -
Method in interface org.apache.hadoop.hbase.util.ByteRange
- Reuse this
ByteRange over a new byte[].
- set(byte[], int, int) -
Method in interface org.apache.hadoop.hbase.util.ByteRange
- Reuse this
ByteRange over a new byte[].
- set(int) -
Method in interface org.apache.hadoop.hbase.util.PositionedByteRange
-
- set(byte[]) -
Method in interface org.apache.hadoop.hbase.util.PositionedByteRange
-
- set(byte[], int, int) -
Method in interface org.apache.hadoop.hbase.util.PositionedByteRange
-
- set(int) -
Method in class org.apache.hadoop.hbase.util.SimpleByteRange
-
- set(byte[]) -
Method in class org.apache.hadoop.hbase.util.SimpleByteRange
-
- set(byte[], int, int) -
Method in class org.apache.hadoop.hbase.util.SimpleByteRange
-
- set(int) -
Method in class org.apache.hadoop.hbase.util.SimplePositionedByteRange
-
- set(byte[]) -
Method in class org.apache.hadoop.hbase.util.SimplePositionedByteRange
-
- set(byte[], int, int) -
Method in class org.apache.hadoop.hbase.util.SimplePositionedByteRange
-
- setACL(String, Permission) -
Method in class org.apache.hadoop.hbase.client.Mutation
-
- setACL(Map<String, Permission>) -
Method in class org.apache.hadoop.hbase.client.Mutation
-
- setACL(String, Permission) -
Method in class org.apache.hadoop.hbase.client.Query
-
- setACL(Map<String, Permission>) -
Method in class org.apache.hadoop.hbase.client.Query
-
- setAcl(String, List<ACL>, int) -
Method in class org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper
- setAcl is an idempotent operation.
- setACLStrategy(boolean) -
Method in class org.apache.hadoop.hbase.client.Mutation
- Deprecated. No effect
- setACLStrategy(boolean) -
Method in class org.apache.hadoop.hbase.client.Query
- Deprecated. No effect
- setAction(int, AccessControlProtos.Permission.Action) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.GlobalPermission.Builder
repeated .Permission.Action action = 1;
- setAction(int, AccessControlProtos.Permission.Action) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.NamespacePermission.Builder
repeated .Permission.Action action = 2;
- setAction(int, AccessControlProtos.Permission.Action) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.TablePermission.Builder
repeated .Permission.Action action = 4;
- setAction(int, ClientProtos.Action) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionAction.Builder
repeated .Action action = 3;
- setAction(int, ClientProtos.Action.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionAction.Builder
repeated .Action action = 3;
- setAdmin(HBaseAdmin) -
Method in class org.apache.hadoop.hbase.tmpl.master.MasterStatusTmpl.ImplData
-
- setAgeOfLastAppliedOp(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder
required uint64 ageOfLastAppliedOp = 1;
- setAgeOfLastAppliedOp(long) -
Method in class org.apache.hadoop.hbase.replication.regionserver.MetricsSink
- Set the age of the last applied operation
- setAgeOfLastShippedOp(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder
required uint64 ageOfLastShippedOp = 2;
- setAgeOfLastShippedOp(long) -
Method in class org.apache.hadoop.hbase.replication.regionserver.MetricsSource
- Set the age of the last edit that was shipped
- setAlgorithm(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.EncryptionProtos.WrappedKey.Builder
required string algorithm = 1;
- setAlgorithmBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.EncryptionProtos.WrappedKey.Builder
required string algorithm = 1;
- setAllowed(boolean) -
Method in class org.apache.hadoop.hbase.security.access.AuthResult
-
- setAllSameType(byte) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta
-
- setAllSameType(boolean) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta
-
- setAllTypes(byte) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta
-
- setAmmount(long) -
Method in class org.apache.hadoop.hbase.thrift.generated.TIncrement
-
- setAmmountIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.TIncrement
-
- setAmount(long) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TColumnIncrement
-
- setAmountIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TColumnIncrement
-
- setAppend(TAppend) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.append_args
- the TAppend to append
- setAppendIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.append_args
-
- setArchiveTables(List<String>) -
Method in class org.apache.hadoop.hbase.backup.example.HFileArchiveTableMonitor
- Set the tables to be archived.
- setArrayOffset(int) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta
-
- setAssignmentManager(AssignmentManager) -
Method in class org.apache.hadoop.hbase.tmpl.master.AssignmentManagerStatusTmpl.ImplData
-
- setAssignmentManager(AssignmentManager) -
Method in class org.apache.hadoop.hbase.tmpl.master.MasterStatusTmpl.ImplData
-
- setAssignmentManager(AssignmentManager) -
Method in class org.apache.hadoop.hbase.tmpl.master.MasterStatusTmpl
-
- setAssignSeqNum(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest.Builder
optional bool assign_seq_num = 3;
- setAssignSeqNum(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.SecureBulkLoadHFilesRequest.Builder
optional bool assign_seq_num = 2;
- setAssociatedCellCount(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.Builder
optional int32 associated_cell_count = 3;
- setAssociatedCellCount(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.Builder
optional int32 associated_cell_count = 8;
- setAssociatedCellCount(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.Builder
optional int32 associated_cell_count = 2;
- setAtomic(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionAction.Builder
optional bool atomic = 2;
- setAttribute(String, byte[]) -
Method in interface org.apache.hadoop.hbase.client.Attributes
- Sets an attribute.
- setAttribute(String, byte[]) -
Method in class org.apache.hadoop.hbase.client.OperationWithAttributes
-
- setAttribute(int, HBaseProtos.NameBytesPair) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Get.Builder
repeated .NameBytesPair attribute = 3;
- setAttribute(int, HBaseProtos.NameBytesPair.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Get.Builder
repeated .NameBytesPair attribute = 3;
- setAttribute(int, HBaseProtos.NameBytesPair) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.Builder
repeated .NameBytesPair attribute = 5;
- setAttribute(int, HBaseProtos.NameBytesPair.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.Builder
repeated .NameBytesPair attribute = 5;
- setAttribute(int, HBaseProtos.NameBytesPair) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.Builder
repeated .NameBytesPair attribute = 2;
- setAttribute(int, HBaseProtos.NameBytesPair.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.Builder
repeated .NameBytesPair attribute = 2;
- setAttributes(int, HBaseProtos.BytesBytesPair) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder
repeated .BytesBytesPair attributes = 2;
- setAttributes(int, HBaseProtos.BytesBytesPair.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder
repeated .BytesBytesPair attributes = 2;
- setAttributes(int, HBaseProtos.BytesBytesPair) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder
repeated .BytesBytesPair attributes = 2;
- setAttributes(int, HBaseProtos.BytesBytesPair.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder
repeated .BytesBytesPair attributes = 2;
- setAttributes(Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAll_args
- Delete attributes
- setAttributes(Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAllRow_args
- Delete attributes
- setAttributes(Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAllRowTs_args
- Delete attributes
- setAttributes(Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAllTs_args
- Delete attributes
- setAttributes(Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.get_args
- Get attributes
- setAttributes(Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRow_args
- Get attributes
- setAttributes(Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRows_args
- Get attributes
- setAttributes(Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsTs_args
- Get attributes
- setAttributes(Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsWithColumns_args
- Get attributes
- setAttributes(Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsWithColumnsTs_args
- Get attributes
- setAttributes(Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowTs_args
- Get attributes
- setAttributes(Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowWithColumns_args
- Get attributes
- setAttributes(Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowWithColumnsTs_args
- Get attributes
- setAttributes(Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getVer_args
- Get attributes
- setAttributes(Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getVerTs_args
- Get attributes
- setAttributes(Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRow_args
- Mutation attributes
- setAttributes(Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRows_args
- Mutation attributes
- setAttributes(Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRowsTs_args
- Mutation attributes
- setAttributes(Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRowTs_args
- Mutation attributes
- setAttributes(Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpen_args
- Scan attributes
- setAttributes(Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenTs_args
- Scan attributes
- setAttributes(Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithPrefix_args
- Scan attributes
- setAttributes(Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithScan_args
- Scan attributes
- setAttributes(Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStop_args
- Scan attributes
- setAttributes(Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStopTs_args
- Scan attributes
- setAttributes(Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TAppend
-
- setAttributes(Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TDelete
-
- setAttributes(Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TGet
-
- setAttributes(Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TIncrement
-
- setAttributes(Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TPut
-
- setAttributes(Map<ByteBuffer, ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TScan
-
- setAttributesIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAll_args
-
- setAttributesIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAllRow_args
-
- setAttributesIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAllRowTs_args
-
- setAttributesIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAllTs_args
-
- setAttributesIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.get_args
-
- setAttributesIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRow_args
-
- setAttributesIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRows_args
-
- setAttributesIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsTs_args
-
- setAttributesIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsWithColumns_args
-
- setAttributesIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsWithColumnsTs_args
-
- setAttributesIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowTs_args
-
- setAttributesIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowWithColumns_args
-
- setAttributesIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowWithColumnsTs_args
-
- setAttributesIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getVer_args
-
- setAttributesIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getVerTs_args
-
- setAttributesIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRow_args
-
- setAttributesIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRows_args
-
- setAttributesIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRowsTs_args
-
- setAttributesIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRowTs_args
-
- setAttributesIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpen_args
-
- setAttributesIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenTs_args
-
- setAttributesIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithPrefix_args
-
- setAttributesIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithScan_args
-
- setAttributesIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStop_args
-
- setAttributesIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStopTs_args
-
- setAttributesIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TAppend
-
- setAttributesIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TDelete
-
- setAttributesIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TGet
-
- setAttributesIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TIncrement
-
- setAttributesIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TPut
-
- setAttributesIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TScan
-
- setAttrs(int, ColumnSchemaMessage.ColumnSchema.Attribute) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;
- setAttrs(int, ColumnSchemaMessage.ColumnSchema.Attribute.Builder) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema.Attribute attrs = 2;
- setAttrs(int, TableSchemaMessage.TableSchema.Attribute) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Builder
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;
- setAttrs(int, TableSchemaMessage.TableSchema.Attribute.Builder) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Builder
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attribute attrs = 2;
- setAuth(int, ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.GetAuthsResponse.Builder
repeated bytes auth = 2;
- setAuth(int, ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.SetAuthsRequest.Builder
repeated bytes auth = 2;
- setAuth(int, int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.UserAuthorizations.Builder
repeated uint32 auth = 2;
- setAuthMethod(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos.WhoAmIResponse.Builder
optional string auth_method = 2;
- setAuthMethodBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos.WhoAmIResponse.Builder
optional string auth_method = 2;
- setAuthorizations(Authorizations) -
Method in class org.apache.hadoop.hbase.client.Query
- Sets the authorizations to be used by this Query
- setAuthorizations(TAuthorization) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TGet
-
- setAuthorizations(TAuthorization) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TScan
-
- setAuthorizationsIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TGet
-
- setAuthorizationsIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TScan
-
- setAuths(RpcController, VisibilityLabelsProtos.SetAuthsRequest) -
Method in interface org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsService.BlockingInterface
-
- setAuths(RpcController, VisibilityLabelsProtos.SetAuthsRequest, RpcCallback<VisibilityLabelsProtos.VisibilityLabelsResponse>) -
Method in interface org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsService.Interface
rpc setAuths(.SetAuthsRequest) returns (.VisibilityLabelsResponse);
- setAuths(RpcController, VisibilityLabelsProtos.SetAuthsRequest, RpcCallback<VisibilityLabelsProtos.VisibilityLabelsResponse>) -
Method in class org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsService
rpc setAuths(.SetAuthsRequest) returns (.VisibilityLabelsResponse);
- setAuths(RpcController, VisibilityLabelsProtos.SetAuthsRequest, RpcCallback<VisibilityLabelsProtos.VisibilityLabelsResponse>) -
Method in class org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsService.Stub
-
- setAuths(byte[], List<byte[]>) -
Method in class org.apache.hadoop.hbase.security.visibility.DefaultVisibilityLabelServiceImpl
-
- setAuths(Configuration, String[], String) -
Static method in class org.apache.hadoop.hbase.security.visibility.VisibilityClient
- Sets given labels globally authorized for the user.
- setAuths(RpcController, VisibilityLabelsProtos.SetAuthsRequest, RpcCallback<VisibilityLabelsProtos.VisibilityLabelsResponse>) -
Method in class org.apache.hadoop.hbase.security.visibility.VisibilityController
-
- setAuths(byte[], List<byte[]>) -
Method in interface org.apache.hadoop.hbase.security.visibility.VisibilityLabelService
- Sets given labels globally authorized for the user.
- setAutoFlush(boolean) -
Method in class org.apache.hadoop.hbase.client.HTable
- Deprecated.
- setAutoFlush(boolean, boolean) -
Method in class org.apache.hadoop.hbase.client.HTable
- Turns 'auto-flush' on or off.
- setAutoFlush(boolean) -
Method in interface org.apache.hadoop.hbase.client.HTableInterface
- Deprecated. in 0.96. When called with setAutoFlush(false), this function also
set clearBufferOnFail to true, which is unexpected but kept for historical reasons.
Replace it with setAutoFlush(false, false) if this is exactly what you want, or by
HTableInterface.setAutoFlushTo(boolean) for all other cases.
- setAutoFlush(boolean, boolean) -
Method in interface org.apache.hadoop.hbase.client.HTableInterface
- Turns 'auto-flush' on or off.
- setAutoFlush(boolean) -
Method in class org.apache.hadoop.hbase.rest.client.RemoteHTable
-
- setAutoFlush(boolean, boolean) -
Method in class org.apache.hadoop.hbase.rest.client.RemoteHTable
-
- setAutoFlushTo(boolean) -
Method in class org.apache.hadoop.hbase.client.HTable
- Set the autoFlush behavior, without changing the value of
clearBufferOnFail
- setAutoFlushTo(boolean) -
Method in interface org.apache.hadoop.hbase.client.HTableInterface
- Set the autoFlush behavior, without changing the value of
clearBufferOnFail
- setAutoFlushTo(boolean) -
Method in class org.apache.hadoop.hbase.rest.client.RemoteHTable
-
- setAverageLoad(double) -
Method in class org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel
-
- setAverageLoad(double) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Builder
optional double averageLoad = 5;
- setAveragePrefixLength(int) -
Method in class org.apache.hadoop.hbase.util.test.RedundantKVGenerator
-
- setAverageQualifierLength(int) -
Method in class org.apache.hadoop.hbase.util.test.RedundantKVGenerator
-
- setAverageSuffixLength(int) -
Method in class org.apache.hadoop.hbase.util.test.RedundantKVGenerator
-
- setBackoffPolicy(RetryCounter.BackoffPolicy) -
Method in class org.apache.hadoop.hbase.util.RetryCounter.RetryConfig
-
- setBackupMasters(int, HBaseProtos.ServerName) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder
repeated .ServerName backup_masters = 8;
- setBackupMasters(int, HBaseProtos.ServerName.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder
repeated .ServerName backup_masters = 8;
- setBalancerOn(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder
optional bool balancer_on = 9;
- setBalancerOn(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.LoadBalancerProtos.LoadBalancerState.Builder
optional bool balancer_on = 1;
- setBalancerOn(boolean) -
Method in class org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker
- Set the balancer on/off
- setBalancerRan(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.Builder
required bool balancer_ran = 1;
- setBalancerRunning(boolean, boolean) -
Method in class org.apache.hadoop.hbase.client.HBaseAdmin
- Turn the load balancer on or off.
- setBalancerRunning(RpcController, MasterProtos.SetBalancerRunningRequest) -
Method in class org.apache.hadoop.hbase.master.HMaster
-
- setBalancerRunning(RpcController, MasterProtos.SetBalancerRunningRequest) -
Method in interface org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService.BlockingInterface
-
- setBalancerRunning(RpcController, MasterProtos.SetBalancerRunningRequest, RpcCallback<MasterProtos.SetBalancerRunningResponse>) -
Method in interface org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService.Interface
rpc SetBalancerRunning(.SetBalancerRunningRequest) returns (.SetBalancerRunningResponse);
- setBalancerRunning(RpcController, MasterProtos.SetBalancerRunningRequest, RpcCallback<MasterProtos.SetBalancerRunningResponse>) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService
rpc SetBalancerRunning(.SetBalancerRunningRequest) returns (.SetBalancerRunningResponse);
- setBalancerRunning(RpcController, MasterProtos.SetBalancerRunningRequest, RpcCallback<MasterProtos.SetBalancerRunningResponse>) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService.Stub
-
- setBaseTimestampDivide(int) -
Method in class org.apache.hadoop.hbase.util.test.RedundantKVGenerator
-
- setBatch(int) -
Method in class org.apache.hadoop.hbase.client.Scan
- Set the maximum number of values to return for each call to next()
- setBatch(int) -
Method in class org.apache.hadoop.hbase.rest.model.ScannerModel
-
- setBatch(int) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner.Builder
optional int32 batch = 4;
- setBatchSize(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.Builder
optional uint32 batch_size = 9;
- setBatchSize(int) -
Method in class org.apache.hadoop.hbase.thrift.generated.TScan
-
- setBatchSize(int) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TScan
-
- setBatchSizeIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.TScan
-
- setBatchSizeIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TScan
-
- setBcn(String) -
Method in class org.apache.hadoop.hbase.tmpl.regionserver.BlockCacheViewTmpl.ImplData
-
- setBcn(String) -
Method in class org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl.ImplData
-
- setBcn(String) -
Method in class org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl
-
- setBcv(String) -
Method in class org.apache.hadoop.hbase.tmpl.regionserver.BlockCacheViewTmpl.ImplData
-
- setBcv(String) -
Method in class org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl.ImplData
-
- setBcv(String) -
Method in class org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl
-
- setBigdecimalMsg(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg.Builder
required bytes bigdecimal_msg = 1;
- setBitwiseOp(ComparatorProtos.BitComparator.BitwiseOp) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos.BitComparator.Builder
required .BitComparator.BitwiseOp bitwise_op = 2;
- setBlockCacheEnabled(boolean) -
Method in class org.apache.hadoop.hbase.HColumnDescriptor
-
- setBlockCacheEnabled(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.ColumnDescriptor
-
- setBlockCacheEnabledIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.ColumnDescriptor
-
- setBlocksize(int) -
Method in class org.apache.hadoop.hbase.HColumnDescriptor
-
- setBloomFilterNbHashes(int) -
Method in class org.apache.hadoop.hbase.thrift.generated.ColumnDescriptor
-
- setBloomFilterNbHashesIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.ColumnDescriptor
-
- setBloomFilterType(BloomType) -
Method in class org.apache.hadoop.hbase.HColumnDescriptor
-
- setBloomFilterType(String) -
Method in class org.apache.hadoop.hbase.thrift.generated.ColumnDescriptor
-
- setBloomFilterTypeIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.ColumnDescriptor
-
- setBloomFilterVectorSize(int) -
Method in class org.apache.hadoop.hbase.thrift.generated.ColumnDescriptor
-
- setBloomFilterVectorSizeIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.ColumnDescriptor
-
- setBody(byte[]) -
Method in class org.apache.hadoop.hbase.rest.client.Response
-
- setBufferOffset(int) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta
-
- setBuilder(Tokenizer) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.encode.tokenize.TokenizerNode
-
- setBulkLoaded(boolean) -
Method in class org.apache.hadoop.hbase.regionserver.StoreFile.Reader
-
- setBulkToken(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.CleanupBulkLoadRequest.Builder
required string bulk_token = 1;
- setBulkToken(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.PrepareBulkLoadResponse.Builder
required string bulk_token = 1;
- setBulkToken(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.SecureBulkLoadHFilesRequest.Builder
required string bulk_token = 4;
- setBulkTokenBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.CleanupBulkLoadRequest.Builder
required string bulk_token = 1;
- setBulkTokenBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.PrepareBulkLoadResponse.Builder
required string bulk_token = 1;
- setBulkTokenBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.SecureBulkLoadHFilesRequest.Builder
required string bulk_token = 4;
- setCacheBlocks(boolean) -
Method in class org.apache.hadoop.hbase.client.Get
- Set whether blocks should be cached for this Get.
- setCacheBlocks(boolean) -
Method in class org.apache.hadoop.hbase.client.Scan
- Set whether blocks should be cached for this Scan.
- setCacheBlocks(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Get.Builder
optional bool cache_blocks = 7 [default = true];
- setCacheBlocks(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.Builder
optional bool cache_blocks = 8 [default = true];
- setCacheBlocks(boolean) -
Method in class org.apache.hadoop.hbase.rest.model.ScannerModel
-
- setCacheBlocks(boolean) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner.Builder
optional bool cacheBlocks = 11;
- setCacheBloomsOnWrite(boolean) -
Method in class org.apache.hadoop.hbase.HColumnDescriptor
-
- setCacheConfig(CacheConfig) -
Method in class org.apache.hadoop.hbase.tmpl.regionserver.BlockCacheTmpl.ImplData
-
- setCacheConfig(CacheConfig) -
Method in class org.apache.hadoop.hbase.tmpl.regionserver.BlockCacheViewTmpl.ImplData
-
- setCacheDataOnWrite(boolean) -
Method in class org.apache.hadoop.hbase.HColumnDescriptor
-
- setCacheDataOnWrite(boolean) -
Method in class org.apache.hadoop.hbase.io.hfile.CacheConfig
- Only used for testing.
- setCacheIndexesOnWrite(boolean) -
Method in class org.apache.hadoop.hbase.HColumnDescriptor
-
- setCacheOff() -
Method in interface org.apache.hadoop.hbase.TableDescriptors
- Disables the tabledescriptor cache
- setCacheOff() -
Method in class org.apache.hadoop.hbase.util.FSTableDescriptors
-
- setCacheOn() -
Method in interface org.apache.hadoop.hbase.TableDescriptors
- Enables the tabledescriptor cache
- setCacheOn() -
Method in class org.apache.hadoop.hbase.util.FSTableDescriptors
-
- setCaching(int) -
Method in class org.apache.hadoop.hbase.client.Scan
- Set the number of rows for caching that will be passed to scanners.
- setCaching(int) -
Method in class org.apache.hadoop.hbase.client.ScannerCallable
- Set the number of rows that will be fetched on next
- setCaching(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.Builder
optional uint32 caching = 17;
- setCaching(int) -
Method in class org.apache.hadoop.hbase.rest.model.ScannerModel
-
- setCaching(int) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner.Builder
optional int32 caching = 9;
- setCaching(int) -
Method in class org.apache.hadoop.hbase.thrift.generated.TScan
-
- setCaching(int) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TScan
-
- setCachingIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.TScan
-
- setCachingIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TScan
-
- setCall(ClientProtos.CoprocessorServiceCall) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest.Builder
required .CoprocessorServiceCall call = 2;
- setCall(ClientProtos.CoprocessorServiceCall.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest.Builder
required .CoprocessorServiceCall call = 2;
- setCallId(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RequestHeader.Builder
optional uint32 call_id = 1;
- setCallId(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ResponseHeader.Builder
optional uint32 call_id = 1;
- setCallQueueLen(int) -
Method in interface org.apache.hadoop.hbase.thrift.MetricsThriftServerSource
- Set the call queue length.
- setCallQueueLen(int) -
Method in class org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceImpl
-
- setCallQueueLen(int) -
Method in class org.apache.hadoop.hbase.thrift.ThriftMetrics
-
- setCatalogJanitorEnabled(boolean) -
Method in class org.apache.hadoop.hbase.master.HMaster
- Switch for the background CatalogJanitor thread.
- setCatalogJanitorEnabled(boolean) -
Method in class org.apache.hadoop.hbase.tmpl.master.MasterStatusTmpl.ImplData
-
- setCatalogJanitorEnabled(boolean) -
Method in class org.apache.hadoop.hbase.tmpl.master.MasterStatusTmpl
-
- setCell(int, CellProtos.Cell) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.Builder
repeated .Cell cell = 1;
- setCell(int, CellProtos.Cell.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.Builder
repeated .Cell cell = 1;
- setCell(TCell) -
Method in class org.apache.hadoop.hbase.thrift.generated.TColumn
-
- setCellBlockCodecClass(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ConnectionHeader.Builder
optional string cell_block_codec_class = 3;
- setCellBlockCodecClassBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ConnectionHeader.Builder
optional string cell_block_codec_class = 3;
- setCellBlockCompressorClass(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ConnectionHeader.Builder
optional string cell_block_compressor_class = 4;
- setCellBlockCompressorClassBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ConnectionHeader.Builder
optional string cell_block_compressor_class = 4;
- setCellBlockMeta(RPCProtos.CellBlockMeta) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RequestHeader.Builder
optional .CellBlockMeta cell_block_meta = 5;
- setCellBlockMeta(RPCProtos.CellBlockMeta.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RequestHeader.Builder
optional .CellBlockMeta cell_block_meta = 5;
- setCellBlockMeta(RPCProtos.CellBlockMeta) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ResponseHeader.Builder
optional .CellBlockMeta cell_block_meta = 3;
- setCellBlockMeta(RPCProtos.CellBlockMeta.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ResponseHeader.Builder
optional .CellBlockMeta cell_block_meta = 3;
- setCellCodecClsName(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALHeader.Builder
optional string cell_codec_cls_name = 5;
- setCellCodecClsNameBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALHeader.Builder
optional string cell_codec_cls_name = 5;
- setCellIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.TColumn
-
- setCellScanner(CellScanner) -
Method in class org.apache.hadoop.hbase.ipc.DelegatingPayloadCarryingRpcController
-
- setCellScanner(CellScanner) -
Method in class org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController
-
- setCellsPerResult(int, int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse.Builder
repeated uint32 cells_per_result = 1;
- setCellType(CellProtos.CellType) -
Method in class org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell.Builder
optional .CellType cell_type = 5;
- setCellVisibility(CellVisibility) -
Method in class org.apache.hadoop.hbase.client.Mutation
- Sets the visibility expression associated with cells in this Mutation.
- setCellVisibility(TCellVisibility) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TAppend
-
- setCellVisibility(TCellVisibility) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TIncrement
-
- setCellVisibility(TCellVisibility) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TPut
-
- setCellVisibilityIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TAppend
-
- setCellVisibilityIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TIncrement
-
- setCellVisibilityIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TPut
-
- setChance(float) -
Method in class org.apache.hadoop.hbase.filter.RandomRowFilter
- Set the chance that a row is included.
- setChance(float) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.RandomRowFilter.Builder
required float chance = 1;
- setChanceForSameQualifier(float) -
Method in class org.apache.hadoop.hbase.util.test.RedundantKVGenerator
-
- setChanceForSimilarQualifier(float) -
Method in class org.apache.hadoop.hbase.util.test.RedundantKVGenerator
-
- setChanceForZeroValue(float) -
Method in class org.apache.hadoop.hbase.util.test.RedundantKVGenerator
-
- setCharset(Charset) -
Method in class org.apache.hadoop.hbase.filter.RegexStringComparator
- Specifies the
Charset to use to convert the row key to a String.
- setCharset(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos.RegexStringComparator.Builder
required string charset = 3;
- setCharsetBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos.RegexStringComparator.Builder
required string charset = 3;
- setCheckExistenceOnly(boolean) -
Method in class org.apache.hadoop.hbase.client.Get
-
- setCheckHdfs(boolean) -
Method in class org.apache.hadoop.hbase.util.HBaseFsck
-
- setCipher(Cipher) -
Method in class org.apache.hadoop.hbase.io.crypto.Context
-
- setCipher(Cipher) -
Method in class org.apache.hadoop.hbase.io.crypto.Encryption.Context
-
- setClassName(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.GenericExceptionMessage.Builder
optional string class_name = 1;
- setClassNameBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.GenericExceptionMessage.Builder
optional string class_name = 1;
- setClose() -
Method in class org.apache.hadoop.hbase.client.ScannerCallable
- Call this when the next invocation of call should close the scanner
- setClosed(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse.Builder
required bool closed = 1;
- setCloseScanner(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest.Builder
optional bool close_scanner = 5;
- setClosestRowBefore(boolean) -
Method in class org.apache.hadoop.hbase.client.Get
-
- setClosestRowBefore(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Get.Builder
optional bool closest_row_before = 11 [default = false];
- setCluster(Cluster) -
Method in class org.apache.hadoop.hbase.rest.client.Client
-
- setClusterDown() -
Method in class org.apache.hadoop.hbase.zookeeper.ClusterStatusTracker
- Sets the cluster as down by deleting the znode.
- setClusterId(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId.Builder
required string cluster_id = 1;
- setClusterId(ClusterIdProtos.ClusterId) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder
optional .ClusterId cluster_id = 5;
- setClusterId(ClusterIdProtos.ClusterId.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder
optional .ClusterId cluster_id = 5;
- setClusterId(HBaseProtos.UUID) -
Method in class org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.Builder
- Deprecated.
- setClusterId(HBaseProtos.UUID.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.Builder
- Deprecated.
- setClusterId(FileSystem, Path, ClusterId, int) -
Static method in class org.apache.hadoop.hbase.util.FSUtils
- Writes a new unique identifier for this cluster to the "hbase.id" file
in the HBase root directory
- setClusterId(ZooKeeperWatcher, ClusterId) -
Static method in class org.apache.hadoop.hbase.zookeeper.ZKClusterId
-
- setClusterIdBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId.Builder
required string cluster_id = 1;
- setClusterIds(List<UUID>) -
Method in class org.apache.hadoop.hbase.client.Mutation
- Marks that the clusters with the given clusterIds have consumed the mutation
- setClusterIds(int, HBaseProtos.UUID) -
Method in class org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.Builder
repeated .UUID cluster_ids = 8;
- setClusterIds(int, HBaseProtos.UUID.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.Builder
repeated .UUID cluster_ids = 8;
- setClusterkey(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Builder
required string clusterkey = 1;
- setClusterKey(String) -
Method in class org.apache.hadoop.hbase.replication.ReplicationPeerConfig
- Set the clusterKey which is the concatenation of the slave cluster's:
hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent
- setClusterkeyBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Builder
required string clusterkey = 1;
- setClusterStatus(ClusterStatus) -
Method in class org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer
-
- setClusterStatus(ClusterStatus) -
Method in class org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer
-
- setClusterStatus(ClusterStatus) -
Method in interface org.apache.hadoop.hbase.master.LoadBalancer
- Set the current cluster status.
- setClusterStatus(ClusterStatusProtos.ClusterStatus) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse.Builder
required .ClusterStatus cluster_status = 1;
- setClusterStatus(ClusterStatusProtos.ClusterStatus.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse.Builder
required .ClusterStatus cluster_status = 1;
- setClusterUp() -
Method in class org.apache.hadoop.hbase.zookeeper.ClusterStatusTracker
- Sets the cluster as up.
- setCode(int) -
Method in class org.apache.hadoop.hbase.rest.client.Response
-
- setColumn(int, ClientProtos.Column) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Get.Builder
repeated .Column column = 2;
- setColumn(int, ClientProtos.Column.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Get.Builder
repeated .Column column = 2;
- setColumn(int, ClientProtos.Column) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.Builder
repeated .Column column = 1;
- setColumn(int, ClientProtos.Column.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.Builder
repeated .Column column = 1;
- setColumn(byte[]) -
Method in class org.apache.hadoop.hbase.rest.model.CellModel
-
- setColumn(ByteString) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.Builder
optional bytes column = 2;
- setColumn(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.atomicIncrement_args
- name of column
- setColumn(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.atomicIncrement_args
-
- setColumn(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAll_args
- name of column whose value is to be deleted
- setColumn(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAll_args
-
- setColumn(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAllTs_args
- name of column whose value is to be deleted
- setColumn(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAllTs_args
-
- setColumn(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.get_args
- column name
- setColumn(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.get_args
-
- setColumn(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getVer_args
- column name
- setColumn(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getVer_args
-
- setColumn(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getVerTs_args
- column name
- setColumn(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getVerTs_args
-
- setColumn(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Mutation
-
- setColumn(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Mutation
-
- setColumn(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.TIncrement
-
- setColumn(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.TIncrement
-
- setColumnFamilies(int, HBaseProtos.ColumnFamilySchema) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder
repeated .ColumnFamilySchema column_families = 3;
- setColumnFamilies(int, HBaseProtos.ColumnFamilySchema.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder
repeated .ColumnFamilySchema column_families = 3;
- setColumnFamilies(HBaseProtos.ColumnFamilySchema) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest.Builder
required .ColumnFamilySchema column_families = 2;
- setColumnFamilies(HBaseProtos.ColumnFamilySchema.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest.Builder
required .ColumnFamilySchema column_families = 2;
- setColumnFamilies(HBaseProtos.ColumnFamilySchema) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest.Builder
required .ColumnFamilySchema column_families = 2;
- setColumnFamilies(HBaseProtos.ColumnFamilySchema.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest.Builder
required .ColumnFamilySchema column_families = 2;
- setColumnFamilies(List<ColumnDescriptor>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.createTable_args
- list of column family descriptors
- setColumnFamiliesIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.createTable_args
-
- setColumnFamily(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.DependentColumnFilter.Builder
optional bytes column_family = 2;
- setColumnFamily(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.SingleColumnValueFilter.Builder
optional bytes column_family = 1;
- setColumnFamilyLength(int) -
Method in class org.apache.hadoop.hbase.util.test.RedundantKVGenerator
-
- setColumnIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.atomicIncrement_args
-
- setColumnIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAll_args
-
- setColumnIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAllTs_args
-
- setColumnIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.get_args
-
- setColumnIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getVer_args
-
- setColumnIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getVerTs_args
-
- setColumnIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Mutation
-
- setColumnIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.TIncrement
-
- setColumnName(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnRequest.Builder
required bytes column_name = 2;
- setColumnName(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.TColumn
-
- setColumnName(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.TColumn
-
- setColumnNameIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.TColumn
-
- setColumnOffset(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.ColumnPaginationFilter.Builder
optional bytes column_offset = 3;
- setColumnQualifier(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.DependentColumnFilter.Builder
optional bytes column_qualifier = 3;
- setColumnQualifier(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.SingleColumnValueFilter.Builder
optional bytes column_qualifier = 2;
- setColumns(List<byte[]>) -
Method in class org.apache.hadoop.hbase.rest.model.ScannerModel
-
- setColumns(List<ColumnSchemaModel>) -
Method in class org.apache.hadoop.hbase.rest.model.TableSchemaModel
-
- setColumns(int, ByteString) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner.Builder
repeated bytes columns = 3;
- setColumns(int, ColumnSchemaMessage.ColumnSchema) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Builder
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;
- setColumns(int, ColumnSchemaMessage.ColumnSchema.Builder) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Builder
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchema columns = 3;
- setColumns(List<ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsWithColumns_args
- List of columns to return, null for all columns
- setColumns(List<ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsWithColumnsTs_args
- List of columns to return, null for all columns
- setColumns(List<ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowWithColumns_args
- List of columns to return, null for all columns
- setColumns(List<ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowWithColumnsTs_args
- List of columns to return, null for all columns
- setColumns(List<ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpen_args
- columns to scan.
- setColumns(List<ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenTs_args
- columns to scan.
- setColumns(List<ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithPrefix_args
- the columns you want returned
- setColumns(List<ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStop_args
- columns to scan.
- setColumns(List<ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStopTs_args
- columns to scan.
- setColumns(Map<ByteBuffer, TCell>) -
Method in class org.apache.hadoop.hbase.thrift.generated.TRowResult
-
- setColumns(List<ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.TScan
-
- setColumns(List<TColumnValue>) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TAppend
-
- setColumns(List<TColumn>) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TDelete
-
- setColumns(List<TColumn>) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TGet
-
- setColumns(List<TColumnIncrement>) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TIncrement
-
- setColumns(List<TColumn>) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TScan
-
- setColumnsIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsWithColumns_args
-
- setColumnsIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsWithColumnsTs_args
-
- setColumnsIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowWithColumns_args
-
- setColumnsIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowWithColumnsTs_args
-
- setColumnsIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpen_args
-
- setColumnsIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenTs_args
-
- setColumnsIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithPrefix_args
-
- setColumnsIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStop_args
-
- setColumnsIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStopTs_args
-
- setColumnsIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.TRowResult
-
- setColumnsIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.TScan
-
- setColumnsIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TAppend
-
- setColumnsIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TDelete
-
- setColumnsIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TGet
-
- setColumnsIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TIncrement
-
- setColumnsIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TScan
-
- setColumnValue(int, ClientProtos.MutationProto.ColumnValue) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.Builder
repeated .MutationProto.ColumnValue column_value = 3;
- setColumnValue(int, ClientProtos.MutationProto.ColumnValue.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.Builder
repeated .MutationProto.ColumnValue column_value = 3;
- setColumnValues(List<TColumnValue>) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TPut
-
- setColumnValues(List<TColumnValue>) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TResult
-
- setColumnValuesIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TPut
-
- setColumnValuesIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TResult
-
- setCommonPrefix(byte[]) -
Method in class org.apache.hadoop.hbase.util.test.RedundantKVGenerator
- get/set
- setCompactionCompressionType(Compression.Algorithm) -
Method in class org.apache.hadoop.hbase.HColumnDescriptor
- Compression types supported in hbase.
- setCompactionEnabled(boolean) -
Method in class org.apache.hadoop.hbase.HTableDescriptor
- Setting the table compaction enable flag.
- setCompactionInput(int, String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor.Builder
repeated string compaction_input = 4;
- setCompactionOutput(int, String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor.Builder
repeated string compaction_output = 5;
- setCompactionState(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest.Builder
optional bool compaction_state = 2;
- setCompactionState(AdminProtos.GetRegionInfoResponse.CompactionState) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.Builder
optional .GetRegionInfoResponse.CompactionState compaction_state = 2;
- setComparable(ComparatorProtos.ByteArrayComparable) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos.BinaryComparator.Builder
required .ByteArrayComparable comparable = 1;
- setComparable(ComparatorProtos.ByteArrayComparable.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos.BinaryComparator.Builder
required .ByteArrayComparable comparable = 1;
- setComparable(ComparatorProtos.ByteArrayComparable) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos.BinaryPrefixComparator.Builder
required .ByteArrayComparable comparable = 1;
- setComparable(ComparatorProtos.ByteArrayComparable.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos.BinaryPrefixComparator.Builder
required .ByteArrayComparable comparable = 1;
- setComparable(ComparatorProtos.ByteArrayComparable) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos.BitComparator.Builder
required .ByteArrayComparable comparable = 1;
- setComparable(ComparatorProtos.ByteArrayComparable.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos.BitComparator.Builder
required .ByteArrayComparable comparable = 1;
- setComparable(ComparatorProtos.ByteArrayComparable) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos.LongComparator.Builder
required .ByteArrayComparable comparable = 1;
- setComparable(ComparatorProtos.ByteArrayComparable.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos.LongComparator.Builder
required .ByteArrayComparable comparable = 1;
- setComparator(ComparatorProtos.Comparator) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition.Builder
required .Comparator comparator = 5;
- setComparator(ComparatorProtos.Comparator.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition.Builder
required .Comparator comparator = 5;
- setComparator(ComparatorProtos.Comparator) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.CompareFilter.Builder
optional .Comparator comparator = 2;
- setComparator(ComparatorProtos.Comparator.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.CompareFilter.Builder
optional .Comparator comparator = 2;
- setComparator(ComparatorProtos.Comparator) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.SingleColumnValueFilter.Builder
required .Comparator comparator = 4;
- setComparator(ComparatorProtos.Comparator.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.SingleColumnValueFilter.Builder
required .Comparator comparator = 4;
- setComparatorClass(Class<? extends KeyValue.KVComparator>) -
Method in class org.apache.hadoop.hbase.io.hfile.FixedFileTrailer
-
- setComparatorClassName(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HFileProtos.FileTrailerProto.Builder
optional string comparator_class_name = 11;
- setComparatorClassNameBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HFileProtos.FileTrailerProto.Builder
optional string comparator_class_name = 11;
- setCompareFilter(FilterProtos.CompareFilter) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.DependentColumnFilter.Builder
required .CompareFilter compare_filter = 1;
- setCompareFilter(FilterProtos.CompareFilter.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.DependentColumnFilter.Builder
required .CompareFilter compare_filter = 1;
- setCompareFilter(FilterProtos.CompareFilter) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.FamilyFilter.Builder
required .CompareFilter compare_filter = 1;
- setCompareFilter(FilterProtos.CompareFilter.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.FamilyFilter.Builder
required .CompareFilter compare_filter = 1;
- setCompareFilter(FilterProtos.CompareFilter) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.QualifierFilter.Builder
required .CompareFilter compare_filter = 1;
- setCompareFilter(FilterProtos.CompareFilter.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.QualifierFilter.Builder
required .CompareFilter compare_filter = 1;
- setCompareFilter(FilterProtos.CompareFilter) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.RowFilter.Builder
required .CompareFilter compare_filter = 1;
- setCompareFilter(FilterProtos.CompareFilter.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.RowFilter.Builder
required .CompareFilter compare_filter = 1;
- setCompareFilter(FilterProtos.CompareFilter) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.ValueFilter.Builder
required .CompareFilter compare_filter = 1;
- setCompareFilter(FilterProtos.CompareFilter.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.ValueFilter.Builder
required .CompareFilter compare_filter = 1;
- setCompareOp(HBaseProtos.CompareType) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.CompareFilter.Builder
required .CompareType compare_op = 1;
- setCompareOp(HBaseProtos.CompareType) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.SingleColumnValueFilter.Builder
required .CompareType compare_op = 3;
- setCompareType(HBaseProtos.CompareType) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition.Builder
required .CompareType compare_type = 4;
- setCompleteSequenceId(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder
optional uint64 complete_sequence_id = 15;
- setCompression(Compression.Algorithm) -
Method in class org.apache.hadoop.hbase.io.hfile.HFileContext
-
- setCompression(String) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder
optional string compression = 5;
- setCompression(String) -
Method in class org.apache.hadoop.hbase.thrift.generated.ColumnDescriptor
-
- setCompressionBytes(ByteString) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder
optional string compression = 5;
- setCompressionCodec(Compression.Algorithm) -
Method in class org.apache.hadoop.hbase.io.hfile.FixedFileTrailer
-
- setCompressionCodec(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HFileProtos.FileTrailerProto.Builder
optional uint32 compression_codec = 12;
- setCompressionContext(CompressionContext) -
Method in class org.apache.hadoop.hbase.regionserver.wal.HLog.Entry
- Set compression context for this entry.
- setCompressionContext(CompressionContext) -
Method in class org.apache.hadoop.hbase.regionserver.wal.HLogKey
-
- setCompressionContext(CompressionContext) -
Method in class org.apache.hadoop.hbase.regionserver.wal.WALEdit
-
- setCompressionIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.ColumnDescriptor
-
- setCompressionType(Compression.Algorithm) -
Method in class org.apache.hadoop.hbase.HColumnDescriptor
- Compression types supported in hbase.
- setCompressTags(boolean) -
Method in class org.apache.hadoop.hbase.HColumnDescriptor
- Set whether the tags should be compressed along with DataBlockEncoding.
- setCompressTags(boolean) -
Method in class org.apache.hadoop.hbase.io.hfile.HFileContext
-
- setCondition(ClientProtos.Condition) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest.Builder
optional .Condition condition = 3;
- setCondition(ClientProtos.Condition.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest.Builder
optional .Condition condition = 3;
- setCondition(ClientProtos.Condition) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest.Builder
optional .Condition condition = 3;
- setCondition(ClientProtos.Condition.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest.Builder
optional .Condition condition = 3;
- setConf(Configuration) -
Method in class org.apache.hadoop.hbase.backup.example.LongTermArchivingHFileCleaner
-
- setConf(Configuration) -
Method in class org.apache.hadoop.hbase.BaseConfigurable
-
- setConf(Configuration) -
Method in class org.apache.hadoop.hbase.io.crypto.Context
-
- setConf(Configuration) -
Method in class org.apache.hadoop.hbase.io.crypto.DefaultCipherProvider
-
- setConf(Configuration) -
Method in class org.apache.hadoop.hbase.io.hfile.AbstractHFileReader
-
- setConf(Configuration) -
Method in class org.apache.hadoop.hbase.mapreduce.DefaultVisibilityExpressionResolver
-
- setConf(Configuration) -
Method in class org.apache.hadoop.hbase.mapreduce.GroupingTableMapper
- Sets the configuration.
- setConf(Configuration) -
Method in class org.apache.hadoop.hbase.mapreduce.HRegionPartitioner
- Sets the configuration.
- setConf(Configuration) -
Method in class org.apache.hadoop.hbase.mapreduce.MultiTableInputFormat
- Sets the configuration.
- setConf(Configuration) -
Method in class org.apache.hadoop.hbase.mapreduce.SimpleTotalOrderPartitioner
-
- setConf(Configuration) -
Method in class org.apache.hadoop.hbase.mapreduce.TableInputFormat
- Sets the configuration.
- setConf(Configuration) -
Method in class org.apache.hadoop.hbase.mapreduce.TableOutputFormat
-
- setConf(Configuration) -
Method in class org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer
-
- setConf(Configuration) -
Method in class org.apache.hadoop.hbase.master.balancer.FavoredNodeLoadBalancer
-
- setConf(Configuration) -
Method in class org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer
-
- setConf(Configuration) -
Method in class org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner
-
- setConf(Configuration) -
Method in class org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner
-
- setConf(Configuration) -
Method in class org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner
-
- setConf(Configuration) -
Method in class org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner
-
- setConf(Configuration) -
Method in class org.apache.hadoop.hbase.master.snapshot.SnapshotLogCleaner
- This method should only be called once, as it starts a thread to keep the cache
up-to-date.
- setConf(Configuration) -
Method in class org.apache.hadoop.hbase.migration.NamespaceUpgrade
-
- setConf(Configuration) -
Method in class org.apache.hadoop.hbase.regionserver.compactions.CompactionPolicy
- Inform the policy that some configuration has been change,
so cached value should be updated it any.
- setConf(Configuration) -
Method in class org.apache.hadoop.hbase.regionserver.compactions.PressureAwareCompactionThroughputController
-
- setConf(Configuration) -
Method in class org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner
-
- setConf(Configuration) -
Method in class org.apache.hadoop.hbase.security.visibility.DefaultVisibilityLabelServiceImpl
-
- setConf(Configuration) -
Method in class org.apache.hadoop.hbase.security.visibility.DefinedSetFilterScanLabelGenerator
-
- setConf(Configuration) -
Method in class org.apache.hadoop.hbase.security.visibility.EnforcingScanLabelGenerator
-
- setConf(Configuration) -
Method in class org.apache.hadoop.hbase.security.visibility.FeedUserAuthScanLabelGenerator
-
- setConf(Configuration) -
Method in class org.apache.hadoop.hbase.security.visibility.SimpleScanLabelGenerator
-
- setConf(Configuration) -
Method in class org.apache.hadoop.hbase.tmpl.regionserver.BlockCacheViewTmpl.ImplData
-
- setConf(Configuration) -
Method in class org.apache.hadoop.hbase.tool.Canary
-
- setConf(Configuration) -
Method in class org.apache.hadoop.hbase.util.AbstractHBaseTool
-
- setConf(Configuration) -
Method in class org.apache.hadoop.hbase.util.MapreduceDependencyClasspathTool
-
- setConfig(Configuration) -
Method in class org.apache.hadoop.hbase.tmpl.regionserver.BlockCacheTmpl.ImplData
-
- setConfiguration(HTableDescriptor, Class<? extends Constraint>, Configuration) -
Static method in class org.apache.hadoop.hbase.constraint.Constraints
- Update the configuration for the
Constraint; does not change the
order in which the constraint is run.
- setConfiguration(String, String) -
Method in class org.apache.hadoop.hbase.HColumnDescriptor
- Setter for storing a configuration setting in
HColumnDescriptor.configuration map.
- setConfiguration(String, String) -
Method in class org.apache.hadoop.hbase.HTableDescriptor
- Setter for storing a configuration setting in
HTableDescriptor.configuration map.
- setConfiguration(String, String) -
Method in class org.apache.hadoop.hbase.NamespaceDescriptor
- Setter for storing a configuration setting in
NamespaceDescriptor.configuration map.
- setConfiguration(int, HBaseProtos.NameStringPair) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder
repeated .NameStringPair configuration = 3;
- setConfiguration(int, HBaseProtos.NameStringPair.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder
repeated .NameStringPair configuration = 3;
- setConfiguration(int, HBaseProtos.NameStringPair) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder
repeated .NameStringPair configuration = 2;
- setConfiguration(int, HBaseProtos.NameStringPair.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder
repeated .NameStringPair configuration = 2;
- setConfiguration(int, HBaseProtos.NameStringPair) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder
repeated .NameStringPair configuration = 4;
- setConfiguration(int, HBaseProtos.NameStringPair.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder
repeated .NameStringPair configuration = 4;
- setConfiguration(int, HBaseProtos.NameStringPair) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder
repeated .NameStringPair configuration = 4;
- setConfiguration(int, HBaseProtos.NameStringPair.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder
repeated .NameStringPair configuration = 4;
- setConfiguration(int, HBaseProtos.NameStringPair) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Builder
repeated .NameStringPair configuration = 4;
- setConfiguration(int, HBaseProtos.NameStringPair.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Builder
repeated .NameStringPair configuration = 4;
- setConfigure(Configuration) -
Static method in class org.apache.hadoop.hbase.replication.regionserver.ReplicationSyncUp
-
- setConflictWaitIterationMs(int) -
Method in class org.apache.hadoop.hbase.regionserver.ServerNonceManager
-
- setConnection(String, int) -
Method in interface org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler
-
- setConnection(String, int) -
Method in class org.apache.hadoop.hbase.monitoring.MonitoredRPCHandlerImpl
- Registers current handler client details.
- setContentLength(int) -
Method in class org.apache.hadoop.hbase.rest.filter.GZIPResponseWrapper
-
- setContext(String) -
Method in class org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry
- Set the metrics context tag
- setControllerException(RpcController, IOException) -
Static method in class org.apache.hadoop.hbase.protobuf.ResponseConverter
- Stores an exception encountered during RPC invocation so it can be passed back
through to the client.
- setCoprocessorHost(RegionCoprocessorHost) -
Method in class org.apache.hadoop.hbase.regionserver.HRegion
-
- setCoprocessors(int, HBaseProtos.Coprocessor) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder
repeated .Coprocessor coprocessors = 6;
- setCoprocessors(int, HBaseProtos.Coprocessor.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder
repeated .Coprocessor coprocessors = 6;
- setCorePoolSize(int) -
Method in class org.apache.hadoop.hbase.thrift.IncrementCoalescer
-
- setCorePoolSize(int) -
Method in interface org.apache.hadoop.hbase.thrift.IncrementCoalescerMBean
-
- setCount(long) -
Method in class org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse.Builder
required int64 count = 1 [default = 0];
- setCount(int) -
Method in class org.apache.hadoop.hbase.regionserver.ColumnCount
- Set the current count to a new count
- setCounter(String, long) -
Method in class org.apache.hadoop.hbase.client.metrics.ScanMetrics
-
- setCreateTime(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition.Builder
required uint64 create_time = 3;
- setCreateTime(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock.Builder
optional int64 create_time = 6;
- setCreationTime(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder
optional int64 creation_time = 3 [default = 0];
- setCreationTime(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder
optional int64 creation_time = 3 [default = 0];
- setCurrentBuffer(ByteBuffer) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeSeeker
-
- setCurrentBuffer(ByteBuffer) -
Method in interface org.apache.hadoop.hbase.io.encoding.DataBlockEncoder.EncodedSeeker
- Set on which buffer there will be done seeking.
- setCurrentCompactedKVs(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder
optional uint64 current_compacted_KVs = 11;
- setCurrentCompactedKVs(long) -
Method in class org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel.Node.Region
-
- setCurrentCompactedKVs(long) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder
optional int64 currentCompactedKVs = 13;
- setDaemon(boolean) -
Method in class org.apache.hadoop.hbase.util.HasThread
-
- setDaemonThreadRunning(Thread) -
Static method in class org.apache.hadoop.hbase.util.Threads
- Utility method that sets name, daemon status and starts passed thread.
- setDaemonThreadRunning(Thread, String) -
Static method in class org.apache.hadoop.hbase.util.Threads
- Utility method that sets name, daemon status and starts passed thread.
- setDaemonThreadRunning(Thread, String, Thread.UncaughtExceptionHandler) -
Static method in class org.apache.hadoop.hbase.util.Threads
- Utility method that sets name, daemon status and starts passed thread.
- setData(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.EncryptionProtos.WrappedKey.Builder
required bytes data = 3;
- setData(int, HBaseProtos.BytesBytesPair) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Builder
repeated .BytesBytesPair data = 3;
- setData(int, HBaseProtos.BytesBytesPair.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Builder
repeated .BytesBytesPair data = 3;
- setData(ByteString) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.Builder
optional bytes data = 4;
- setData(String, byte[], int) -
Method in class org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper
- setData is NOT an idempotent operation.
- setData(ZooKeeperWatcher, String, byte[], int) -
Static method in class org.apache.hadoop.hbase.zookeeper.ZKUtil
- Sets the data of the existing znode to be the specified data.
- setData(ZooKeeperWatcher, String, byte[]) -
Static method in class org.apache.hadoop.hbase.zookeeper.ZKUtil
- Sets the data of the existing znode to be the specified data.
- setData(String, byte[]) -
Static method in class org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp
-
- setDataBlockEncoding(DataBlockEncoding) -
Method in class org.apache.hadoop.hbase.HColumnDescriptor
- Set data block encoding algorithm used in block cache.
- setDataBlockEncoding(DataBlockEncoding) -
Method in class org.apache.hadoop.hbase.io.hfile.HFileContext
-
- setDataIndexCount(int) -
Method in class org.apache.hadoop.hbase.io.hfile.FixedFileTrailer
-
- setDataIndexCount(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HFileProtos.FileTrailerProto.Builder
optional uint32 data_index_count = 5;
- setDataLocality(float) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder
optional float data_locality = 16;
- setDate(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.Builder
required string date = 5;
- setDateBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.Builder
required string date = 5;
- setDeadNodes(List<String>) -
Method in class org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel
-
- setDeadNodes(int, String) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Builder
repeated string deadNodes = 2;
- setDeadServers(int, HBaseProtos.ServerName) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder
repeated .ServerName dead_servers = 3;
- setDeadServers(int, HBaseProtos.ServerName.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder
repeated .ServerName dead_servers = 3;
- setDeadServers(Set<ServerName>) -
Method in class org.apache.hadoop.hbase.tmpl.master.MasterStatusTmpl.ImplData
-
- setDeadServers(Set<ServerName>) -
Method in class org.apache.hadoop.hbase.tmpl.master.MasterStatusTmpl
-
- setDeclaringClass(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.StackTraceElementMessage.Builder
optional string declaring_class = 1;
- setDeclaringClassBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.StackTraceElementMessage.Builder
optional string declaring_class = 1;
- setDefaultClientPort(int) -
Method in class org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster
-
- setDeferredLogFlush(boolean) -
Method in class org.apache.hadoop.hbase.HTableDescriptor
- Deprecated.
- setDeletedTable(TableName) -
Method in class org.apache.hadoop.hbase.zookeeper.ZKTable
- Deletes the table in zookeeper.
- setDeleteFamilyBloomFilterFaulty() -
Method in class org.apache.hadoop.hbase.regionserver.StoreFile.Reader
-
- setDeletes(List<TDelete>) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.deleteMultiple_args
- list of TDeletes to delete
- setDeleteSingle(TDelete) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndDelete_args
- the TDelete to execute if the check succeeds
- setDeleteSingle(TDelete) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.deleteSingle_args
- the TDelete to delete
- setDeleteSingle(TDelete) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TMutation
-
- setDeleteSingleIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndDelete_args
-
- setDeleteSingleIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.deleteSingle_args
-
- setDeletesIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.deleteMultiple_args
-
- setDeleteType(BulkDeleteProtos.BulkDeleteRequest.DeleteType) -
Method in class org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.Builder
required .BulkDeleteRequest.DeleteType deleteType = 2;
- setDeleteType(ClientProtos.MutationProto.DeleteType) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue.Builder
optional .MutationProto.DeleteType delete_type = 4;
- setDeleteType(TDeleteType) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TDelete
-
- setDeleteTypeIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TDelete
-
- setDescription(String) -
Method in interface org.apache.hadoop.hbase.monitoring.MonitoredTask
-
- setDescription(String, String) -
Method in class org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest
- Sets the region/store name, for logging.
- setDestination(ServerName) -
Method in class org.apache.hadoop.hbase.master.RegionPlan
- Set the destination server for the plan for this region.
- setDestinationServer(HBaseProtos.ServerName) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest.Builder
optional .ServerName destination_server = 4;
- setDestinationServer(HBaseProtos.ServerName.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest.Builder
optional .ServerName destination_server = 4;
- setDestServerName(HBaseProtos.ServerName) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest.Builder
optional .ServerName dest_server_name = 2;
- setDestServerName(HBaseProtos.ServerName.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest.Builder
optional .ServerName dest_server_name = 2;
- setDisabledTable(TableName) -
Method in class org.apache.hadoop.hbase.zookeeper.ZKTable
- Sets the specified table as DISABLED in zookeeper.
- setDisablingTable(TableName) -
Method in class org.apache.hadoop.hbase.zookeeper.ZKTable
- Sets the specified table as DISABLING in zookeeper.
- setDisplayFullReport() -
Static method in class org.apache.hadoop.hbase.util.HBaseFsck
- Display the full report from fsck.
- setDone(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.Builder
optional bool done = 1 [default = false];
- setDone(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse.Builder
optional bool done = 1 [default = false];
- setDone(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.Builder
optional bool done = 1 [default = false];
- setDoNotRetry(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ExceptionResponse.Builder
optional bool do_not_retry = 5;
- setDoubleMsg(double) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.DoubleMsg.Builder
required double double_msg = 1;
- setDropDependentColumn(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.DependentColumnFilter.Builder
optional bool drop_dependent_column = 4;
- setDummyHeader(byte[]) -
Method in class org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext
-
- setDummyHeader(byte[]) -
Method in interface org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext
- sets the dummy header bytes
- setDurability(Durability) -
Method in class org.apache.hadoop.hbase.client.Mutation
- Set the durability for this mutation
- setDurability(Durability) -
Method in class org.apache.hadoop.hbase.HTableDescriptor
- Sets the
Durability setting for the table.
- setDurability(ClientProtos.MutationProto.Durability) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.Builder
optional .MutationProto.Durability durability = 6 [default = USE_DEFAULT];
- setDurability(TDurability) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TAppend
-
- setDurability(TDurability) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TDelete
-
- setDurability(TDurability) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TIncrement
-
- setDurability(TDurability) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TPut
-
- setDurabilityIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TAppend
-
- setDurabilityIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TDelete
-
- setDurabilityIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TIncrement
-
- setDurabilityIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TPut
-
- setEffectiveUser(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder
required string effective_user = 1;
- setEffectiveUser(String) -
Method in class org.apache.hadoop.hbase.util.ConnectionCache
- Set the current thread local effective user
- setEffectiveUserBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder
required string effective_user = 1;
- setEnable(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest.Builder
required bool enable = 1;
- setEnabled(boolean) -
Method in class org.apache.hadoop.hbase.master.CatalogJanitor
-
- setEnabled(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.Builder
required bool enabled = 1;
- setEnabledTable(TableName) -
Method in class org.apache.hadoop.hbase.master.AssignmentManager
-
- setEnabledTable(TableName) -
Method in class org.apache.hadoop.hbase.zookeeper.ZKTable
- Sets the ENABLED state in the cache and creates or force updates a node to
ENABLED state for the specified table
- setEnablingTable(TableName) -
Method in class org.apache.hadoop.hbase.zookeeper.ZKTable
- Sets the specified table as ENABLING in zookeeper.
- setEncodedRegionName(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor.Builder
required bytes encoded_region_name = 2;
- setEncodedRegionName(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.Builder
required bytes encoded_region_name = 1;
- setEncodeOnDisk(boolean) -
Method in class org.apache.hadoop.hbase.HColumnDescriptor
- Deprecated.
- setEncryptionContext(Encryption.Context) -
Method in class org.apache.hadoop.hbase.io.hfile.HFileContext
-
- setEncryptionKey(byte[]) -
Method in class org.apache.hadoop.hbase.HColumnDescriptor
- Set the raw crypto key attribute for the family
- setEncryptionKey(byte[]) -
Method in class org.apache.hadoop.hbase.io.hfile.FixedFileTrailer
-
- setEncryptionKey(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HFileProtos.FileTrailerProto.Builder
optional bytes encryption_key = 13;
- setEncryptionKey(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALHeader.Builder
optional bytes encryption_key = 2;
- setEncryptionType(String) -
Method in class org.apache.hadoop.hbase.HColumnDescriptor
- Set the encryption algorithm for use with this family
- setEndKey(Configuration, byte[]) -
Static method in class org.apache.hadoop.hbase.mapreduce.SimpleTotalOrderPartitioner
-
- setEndKey(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder
optional bytes end_key = 4;
- setEndKey(byte[]) -
Method in class org.apache.hadoop.hbase.rest.model.TableRegionModel
-
- setEndKey(ByteString) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.Builder
optional bytes endKey = 3;
- setEndKey(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.TRegionInfo
-
- setEndKey(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.TRegionInfo
-
- setEndKeyIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.TRegionInfo
-
- setEndRow(byte[]) -
Method in class org.apache.hadoop.hbase.mapred.TableRecordReader
- Deprecated.
- setEndRow(byte[]) -
Method in class org.apache.hadoop.hbase.mapred.TableRecordReaderImpl
- Deprecated.
- setEndRow(byte[]) -
Method in class org.apache.hadoop.hbase.rest.model.ScannerModel
-
- setEndRow(ByteString) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner.Builder
optional bytes endRow = 2;
- setEndTime(long) -
Method in class org.apache.hadoop.hbase.rest.model.ScannerModel
-
- setEndTime(long) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner.Builder
optional int64 endTime = 6;
- setEndTime(long) -
Method in class org.apache.hadoop.hbase.rest.RowSpec
-
- setEngine(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos.RegexStringComparator.Builder
optional string engine = 4;
- setEngineBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos.RegexStringComparator.Builder
optional string engine = 4;
- setEntries(List<HLog.Entry>) -
Method in class org.apache.hadoop.hbase.replication.ReplicationEndpoint.ReplicateContext
-
- setEntry(int, AdminProtos.WALEntry) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.Builder
repeated .WALEntry entry = 1;
- setEntry(int, AdminProtos.WALEntry.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.Builder
repeated .WALEntry entry = 1;
- setEntryCount(long) -
Method in class org.apache.hadoop.hbase.io.hfile.FixedFileTrailer
-
- setEntryCount(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HFileProtos.FileTrailerProto.Builder
optional uint64 entry_count = 7;
- setErr(Throwable) -
Method in class org.apache.hadoop.hbase.regionserver.handler.ParallelSeekHandler
-
- setErrorHandler(HBaseRPCErrorHandler) -
Method in class org.apache.hadoop.hbase.ipc.RpcServer
- Set the handler for calling out of RPC for error conditions.
- setErrorHandler(HBaseRPCErrorHandler) -
Method in interface org.apache.hadoop.hbase.ipc.RpcServerInterface
-
- setErrorInfo(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.GenericExceptionMessage.Builder
optional bytes error_info = 3;
- setErrorMessage(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse.Builder
optional string error_message = 1;
- setErrorMessage(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest.Builder
required string error_message = 2;
- setErrorMessageBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse.Builder
optional string error_message = 1;
- setErrorMessageBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest.Builder
required string error_message = 2;
- setEventTypeCode(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition.Builder
required uint32 event_type_code = 1;
- setEvictBlocksOnClose(boolean) -
Method in class org.apache.hadoop.hbase.HColumnDescriptor
-
- setEvictOnClose(boolean) -
Method in class org.apache.hadoop.hbase.io.hfile.CacheConfig
- Only used for testing.
- setException(IOException) -
Method in class org.apache.hadoop.hbase.ipc.RpcClient.Call
- Set the exception when there is an error.
- setException(HBaseProtos.NameBytesPair) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionActionResult.Builder
optional .NameBytesPair exception = 2;
- setException(HBaseProtos.NameBytesPair.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionActionResult.Builder
optional .NameBytesPair exception = 2;
- setException(HBaseProtos.NameBytesPair) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrException.Builder
optional .NameBytesPair exception = 3;
- setException(HBaseProtos.NameBytesPair.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrException.Builder
optional .NameBytesPair exception = 3;
- setException(RPCProtos.ExceptionResponse) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ResponseHeader.Builder
optional .ExceptionResponse exception = 2;
- setException(RPCProtos.ExceptionResponse.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ResponseHeader.Builder
optional .ExceptionResponse exception = 2;
- setExceptionClassName(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ExceptionResponse.Builder
optional string exception_class_name = 1;
- setExceptionClassNameBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ExceptionResponse.Builder
optional string exception_class_name = 1;
- setExist(AlreadyExists) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.createTable_result
-
- setExistenceOnly(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Get.Builder
optional bool existence_only = 10 [default = false];
- setExistIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.createTable_result
-
- setExists(Boolean) -
Method in class org.apache.hadoop.hbase.client.Result
-
- setExists(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.Builder
optional bool exists = 3;
- setExpectedTimeout(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.Builder
required int64 expected_timeout = 1;
- setExpectedTimeout(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse.Builder
required int64 expected_timeout = 1;
- setExpiration(long) -
Method in class org.apache.hadoop.hbase.security.token.AuthenticationKey
-
- setExpirationDate(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos.AuthenticationKey.Builder
required int64 expiration_date = 2;
- setExpirationDate(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos.TokenIdentifier.Builder
optional int64 expiration_date = 5;
- setExpression(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CellVisibility.Builder
required string expression = 1;
- setExpression(String) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TCellVisibility
-
- setExpressionBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CellVisibility.Builder
required string expression = 1;
- setExpressionIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TCellVisibility
-
- setFailed(String) -
Method in class org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController
-
- setFailed(String) -
Method in class org.apache.hadoop.hbase.ipc.ServerRpcController
-
- setFailedOn(IOException) -
Method in class org.apache.hadoop.hbase.ipc.ServerRpcController
- Sets an exception to be communicated back to the
Service client.
- setFakeLookupMode(boolean) -
Static method in class org.apache.hadoop.hbase.util.ByteBloomFilter
-
- setFamily(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.TablePermission.Builder
optional bytes family = 2;
- setFamily(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest.Builder
optional bytes family = 3;
- setFamily(int, ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest.Builder
repeated bytes family = 2;
- setFamily(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell.Builder
optional bytes family = 2;
- setFamily(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.CellProtos.KeyValue.Builder
required bytes family = 2;
- setFamily(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest.FamilyPath.Builder
required bytes family = 1;
- setFamily(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Column.Builder
required bytes family = 1;
- setFamily(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition.Builder
required bytes family = 2;
- setFamily(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.Builder
required bytes family = 1;
- setFamily(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.WALProtos.FamilyScope.Builder
required bytes family = 1;
- setFamily(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowOrBefore_args
- column name
- setFamily(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowOrBefore_args
-
- setFamily(byte[]) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TColumn
-
- setFamily(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TColumn
-
- setFamily(byte[]) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TColumnIncrement
-
- setFamily(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TColumnIncrement
-
- setFamily(byte[]) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TColumnValue
-
- setFamily(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TColumnValue
-
- setFamily(byte[]) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndDelete_args
- column family to check
- setFamily(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndDelete_args
-
- setFamily(byte[]) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndPut_args
- column family to check
- setFamily(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndPut_args
-
- setFamily(byte[]) -
Method in class org.apache.hadoop.hbase.util.test.RedundantKVGenerator
-
- setFamilyCellMap(NavigableMap<byte[], List<Cell>>) -
Method in class org.apache.hadoop.hbase.client.Mutation
- Method for setting the put's familyMap
- setFamilyFiles(int, SnapshotProtos.SnapshotRegionManifest.FamilyFiles) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.Builder
repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
- setFamilyFiles(int, SnapshotProtos.SnapshotRegionManifest.FamilyFiles.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.Builder
repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
- setFamilyIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowOrBefore_args
-
- setFamilyIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TColumn
-
- setFamilyIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TColumnIncrement
-
- setFamilyIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TColumnValue
-
- setFamilyIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndDelete_args
-
- setFamilyIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndPut_args
-
- setFamilyMap(NavigableMap<byte[], List<KeyValue>>) -
Method in class org.apache.hadoop.hbase.client.Mutation
- Deprecated. use
Mutation.setFamilyCellMap(NavigableMap) instead.
- setFamilyMap(Map<byte[], NavigableSet<byte[]>>) -
Method in class org.apache.hadoop.hbase.client.Scan
- Setting the familyMap
- setFamilyName(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles.Builder
required bytes family_name = 1;
- setFamilyName(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor.Builder
required bytes family_name = 3;
- setFamilyName(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder
required bytes family_name = 1;
- setFamilyOffsetWidth(int) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta
-
- setFamilyPath(int, ClientProtos.BulkLoadHFileRequest.FamilyPath) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest.Builder
repeated .BulkLoadHFileRequest.FamilyPath family_path = 2;
- setFamilyPath(int, ClientProtos.BulkLoadHFileRequest.FamilyPath.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest.Builder
repeated .BulkLoadHFileRequest.FamilyPath family_path = 2;
- setFamilyPath(int, ClientProtos.BulkLoadHFileRequest.FamilyPath) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.SecureBulkLoadHFilesRequest.Builder
repeated .BulkLoadHFileRequest.FamilyPath family_path = 1;
- setFamilyPath(int, ClientProtos.BulkLoadHFileRequest.FamilyPath.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.SecureBulkLoadHFilesRequest.Builder
repeated .BulkLoadHFileRequest.FamilyPath family_path = 1;
- setFanIndex(int) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.decode.row.RowNodeReader
-
- setFavoredNode(int, HBaseProtos.ServerName) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes.Builder
repeated .ServerName favored_node = 1;
- setFavoredNode(int, HBaseProtos.ServerName.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes.Builder
repeated .ServerName favored_node = 1;
- setFavoredNodes(int, HBaseProtos.ServerName) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo.Builder
repeated .ServerName favored_nodes = 3;
- setFavoredNodes(int, HBaseProtos.ServerName.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo.Builder
repeated .ServerName favored_nodes = 3;
- setFavoredNodes(int, HBaseProtos.ServerName) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo.Builder
repeated .ServerName favored_nodes = 2;
- setFavoredNodes(int, HBaseProtos.ServerName.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo.Builder
repeated .ServerName favored_nodes = 2;
- setFieldValue(AlreadyExists._Fields, Object) -
Method in exception org.apache.hadoop.hbase.thrift.generated.AlreadyExists
-
- setFieldValue(BatchMutation._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.BatchMutation
-
- setFieldValue(ColumnDescriptor._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.ColumnDescriptor
-
- setFieldValue(Hbase.atomicIncrement_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.atomicIncrement_args
-
- setFieldValue(Hbase.atomicIncrement_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.atomicIncrement_result
-
- setFieldValue(Hbase.compact_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.compact_args
-
- setFieldValue(Hbase.compact_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.compact_result
-
- setFieldValue(Hbase.createTable_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.createTable_args
-
- setFieldValue(Hbase.createTable_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.createTable_result
-
- setFieldValue(Hbase.deleteAll_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAll_args
-
- setFieldValue(Hbase.deleteAll_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAll_result
-
- setFieldValue(Hbase.deleteAllRow_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAllRow_args
-
- setFieldValue(Hbase.deleteAllRow_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAllRow_result
-
- setFieldValue(Hbase.deleteAllRowTs_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAllRowTs_args
-
- setFieldValue(Hbase.deleteAllRowTs_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAllRowTs_result
-
- setFieldValue(Hbase.deleteAllTs_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAllTs_args
-
- setFieldValue(Hbase.deleteAllTs_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAllTs_result
-
- setFieldValue(Hbase.deleteTable_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteTable_args
-
- setFieldValue(Hbase.deleteTable_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteTable_result
-
- setFieldValue(Hbase.disableTable_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.disableTable_args
-
- setFieldValue(Hbase.disableTable_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.disableTable_result
-
- setFieldValue(Hbase.enableTable_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.enableTable_args
-
- setFieldValue(Hbase.enableTable_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.enableTable_result
-
- setFieldValue(Hbase.get_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.get_args
-
- setFieldValue(Hbase.get_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.get_result
-
- setFieldValue(Hbase.getColumnDescriptors_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getColumnDescriptors_args
-
- setFieldValue(Hbase.getColumnDescriptors_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getColumnDescriptors_result
-
- setFieldValue(Hbase.getRegionInfo_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRegionInfo_args
-
- setFieldValue(Hbase.getRegionInfo_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRegionInfo_result
-
- setFieldValue(Hbase.getRow_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRow_args
-
- setFieldValue(Hbase.getRow_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRow_result
-
- setFieldValue(Hbase.getRowOrBefore_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowOrBefore_args
-
- setFieldValue(Hbase.getRowOrBefore_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowOrBefore_result
-
- setFieldValue(Hbase.getRows_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRows_args
-
- setFieldValue(Hbase.getRows_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRows_result
-
- setFieldValue(Hbase.getRowsTs_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsTs_args
-
- setFieldValue(Hbase.getRowsTs_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsTs_result
-
- setFieldValue(Hbase.getRowsWithColumns_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsWithColumns_args
-
- setFieldValue(Hbase.getRowsWithColumns_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsWithColumns_result
-
- setFieldValue(Hbase.getRowsWithColumnsTs_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsWithColumnsTs_args
-
- setFieldValue(Hbase.getRowsWithColumnsTs_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsWithColumnsTs_result
-
- setFieldValue(Hbase.getRowTs_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowTs_args
-
- setFieldValue(Hbase.getRowTs_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowTs_result
-
- setFieldValue(Hbase.getRowWithColumns_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowWithColumns_args
-
- setFieldValue(Hbase.getRowWithColumns_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowWithColumns_result
-
- setFieldValue(Hbase.getRowWithColumnsTs_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowWithColumnsTs_args
-
- setFieldValue(Hbase.getRowWithColumnsTs_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowWithColumnsTs_result
-
- setFieldValue(Hbase.getTableNames_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getTableNames_args
-
- setFieldValue(Hbase.getTableNames_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getTableNames_result
-
- setFieldValue(Hbase.getTableRegions_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getTableRegions_args
-
- setFieldValue(Hbase.getTableRegions_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getTableRegions_result
-
- setFieldValue(Hbase.getVer_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getVer_args
-
- setFieldValue(Hbase.getVer_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getVer_result
-
- setFieldValue(Hbase.getVerTs_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getVerTs_args
-
- setFieldValue(Hbase.getVerTs_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getVerTs_result
-
- setFieldValue(Hbase.increment_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.increment_args
-
- setFieldValue(Hbase.increment_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.increment_result
-
- setFieldValue(Hbase.incrementRows_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.incrementRows_args
-
- setFieldValue(Hbase.incrementRows_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.incrementRows_result
-
- setFieldValue(Hbase.isTableEnabled_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.isTableEnabled_args
-
- setFieldValue(Hbase.isTableEnabled_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.isTableEnabled_result
-
- setFieldValue(Hbase.majorCompact_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.majorCompact_args
-
- setFieldValue(Hbase.majorCompact_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.majorCompact_result
-
- setFieldValue(Hbase.mutateRow_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRow_args
-
- setFieldValue(Hbase.mutateRow_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRow_result
-
- setFieldValue(Hbase.mutateRows_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRows_args
-
- setFieldValue(Hbase.mutateRows_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRows_result
-
- setFieldValue(Hbase.mutateRowsTs_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRowsTs_args
-
- setFieldValue(Hbase.mutateRowsTs_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRowsTs_result
-
- setFieldValue(Hbase.mutateRowTs_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRowTs_args
-
- setFieldValue(Hbase.mutateRowTs_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRowTs_result
-
- setFieldValue(Hbase.scannerClose_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerClose_args
-
- setFieldValue(Hbase.scannerClose_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerClose_result
-
- setFieldValue(Hbase.scannerGet_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerGet_args
-
- setFieldValue(Hbase.scannerGet_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerGet_result
-
- setFieldValue(Hbase.scannerGetList_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerGetList_args
-
- setFieldValue(Hbase.scannerGetList_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerGetList_result
-
- setFieldValue(Hbase.scannerOpen_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpen_args
-
- setFieldValue(Hbase.scannerOpen_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpen_result
-
- setFieldValue(Hbase.scannerOpenTs_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenTs_args
-
- setFieldValue(Hbase.scannerOpenTs_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenTs_result
-
- setFieldValue(Hbase.scannerOpenWithPrefix_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithPrefix_args
-
- setFieldValue(Hbase.scannerOpenWithPrefix_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithPrefix_result
-
- setFieldValue(Hbase.scannerOpenWithScan_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithScan_args
-
- setFieldValue(Hbase.scannerOpenWithScan_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithScan_result
-
- setFieldValue(Hbase.scannerOpenWithStop_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStop_args
-
- setFieldValue(Hbase.scannerOpenWithStop_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStop_result
-
- setFieldValue(Hbase.scannerOpenWithStopTs_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStopTs_args
-
- setFieldValue(Hbase.scannerOpenWithStopTs_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStopTs_result
-
- setFieldValue(IllegalArgument._Fields, Object) -
Method in exception org.apache.hadoop.hbase.thrift.generated.IllegalArgument
-
- setFieldValue(IOError._Fields, Object) -
Method in exception org.apache.hadoop.hbase.thrift.generated.IOError
-
- setFieldValue(Mutation._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.Mutation
-
- setFieldValue(TCell._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.TCell
-
- setFieldValue(TColumn._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.TColumn
-
- setFieldValue(TIncrement._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.TIncrement
-
- setFieldValue(TRegionInfo._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.TRegionInfo
-
- setFieldValue(TRowResult._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.TRowResult
-
- setFieldValue(TScan._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift.generated.TScan
-
- setFieldValue(TAppend._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TAppend
-
- setFieldValue(TAuthorization._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TAuthorization
-
- setFieldValue(TCellVisibility._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TCellVisibility
-
- setFieldValue(TColumn._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TColumn
-
- setFieldValue(TColumnIncrement._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TColumnIncrement
-
- setFieldValue(TColumnValue._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TColumnValue
-
- setFieldValue(TDelete._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TDelete
-
- setFieldValue(TGet._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TGet
-
- setFieldValue(THBaseService.append_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.append_args
-
- setFieldValue(THBaseService.append_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.append_result
-
- setFieldValue(THBaseService.checkAndDelete_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndDelete_args
-
- setFieldValue(THBaseService.checkAndDelete_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndDelete_result
-
- setFieldValue(THBaseService.checkAndPut_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndPut_args
-
- setFieldValue(THBaseService.checkAndPut_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndPut_result
-
- setFieldValue(THBaseService.closeScanner_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.closeScanner_args
-
- setFieldValue(THBaseService.closeScanner_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.closeScanner_result
-
- setFieldValue(THBaseService.deleteMultiple_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.deleteMultiple_args
-
- setFieldValue(THBaseService.deleteMultiple_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.deleteMultiple_result
-
- setFieldValue(THBaseService.deleteSingle_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.deleteSingle_args
-
- setFieldValue(THBaseService.deleteSingle_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.deleteSingle_result
-
- setFieldValue(THBaseService.exists_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.exists_args
-
- setFieldValue(THBaseService.exists_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.exists_result
-
- setFieldValue(THBaseService.get_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.get_args
-
- setFieldValue(THBaseService.get_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.get_result
-
- setFieldValue(THBaseService.getMultiple_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.getMultiple_args
-
- setFieldValue(THBaseService.getMultiple_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.getMultiple_result
-
- setFieldValue(THBaseService.getScannerResults_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.getScannerResults_args
-
- setFieldValue(THBaseService.getScannerResults_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.getScannerResults_result
-
- setFieldValue(THBaseService.getScannerRows_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.getScannerRows_args
-
- setFieldValue(THBaseService.getScannerRows_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.getScannerRows_result
-
- setFieldValue(THBaseService.increment_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.increment_args
-
- setFieldValue(THBaseService.increment_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.increment_result
-
- setFieldValue(THBaseService.mutateRow_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.mutateRow_args
-
- setFieldValue(THBaseService.mutateRow_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.mutateRow_result
-
- setFieldValue(THBaseService.openScanner_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.openScanner_args
-
- setFieldValue(THBaseService.openScanner_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.openScanner_result
-
- setFieldValue(THBaseService.put_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.put_args
-
- setFieldValue(THBaseService.put_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.put_result
-
- setFieldValue(THBaseService.putMultiple_args._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.putMultiple_args
-
- setFieldValue(THBaseService.putMultiple_result._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.putMultiple_result
-
- setFieldValue(TIllegalArgument._Fields, Object) -
Method in exception org.apache.hadoop.hbase.thrift2.generated.TIllegalArgument
-
- setFieldValue(TIncrement._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TIncrement
-
- setFieldValue(TIOError._Fields, Object) -
Method in exception org.apache.hadoop.hbase.thrift2.generated.TIOError
-
- setFieldValue(TPut._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TPut
-
- setFieldValue(TResult._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TResult
-
- setFieldValue(TRowMutations._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TRowMutations
-
- setFieldValue(TScan._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TScan
-
- setFieldValue(TTimeRange._Fields, Object) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TTimeRange
-
- setFileInfoOffset(long) -
Method in class org.apache.hadoop.hbase.io.hfile.FixedFileTrailer
-
- setFileInfoOffset(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HFileProtos.FileTrailerProto.Builder
optional uint64 file_info_offset = 1;
- setFileName(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.StackTraceElementMessage.Builder
optional string file_name = 3;
- setFileNameBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.StackTraceElementMessage.Builder
optional string file_name = 3;
- setFileSize(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile.Builder
optional uint64 file_size = 3;
- setFilter(Filter) -
Method in class org.apache.hadoop.hbase.client.Get
-
- setFilter(Filter) -
Method in class org.apache.hadoop.hbase.client.Query
- Apply the specified server-side filter when performing the Query.
- setFilter(Filter) -
Method in class org.apache.hadoop.hbase.client.Scan
-
- setFilter(FilterProtos.Filter) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Get.Builder
optional .Filter filter = 4;
- setFilter(FilterProtos.Filter.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Get.Builder
optional .Filter filter = 4;
- setFilter(FilterProtos.Filter) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.Builder
optional .Filter filter = 5;
- setFilter(FilterProtos.Filter.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.Builder
optional .Filter filter = 5;
- setFilter(FilterProtos.Filter) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.FilterWrapper.Builder
required .Filter filter = 1;
- setFilter(FilterProtos.Filter.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.FilterWrapper.Builder
required .Filter filter = 1;
- setFilter(FilterProtos.Filter) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.SkipFilter.Builder
required .Filter filter = 1;
- setFilter(FilterProtos.Filter.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.SkipFilter.Builder
required .Filter filter = 1;
- setFilter(FilterProtos.Filter) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.WhileMatchFilter.Builder
required .Filter filter = 1;
- setFilter(FilterProtos.Filter.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.WhileMatchFilter.Builder
required .Filter filter = 1;
- setFilter(String) -
Method in class org.apache.hadoop.hbase.rest.model.ScannerModel
-
- setFilter(String) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner.Builder
optional string filter = 8;
- setFilter(String) -
Method in class org.apache.hadoop.hbase.tmpl.common.TaskMonitorTmpl.ImplData
-
- setFilter(String) -
Method in class org.apache.hadoop.hbase.tmpl.common.TaskMonitorTmpl
-
- setFilter(String) -
Method in class org.apache.hadoop.hbase.tmpl.master.MasterStatusTmpl.ImplData
-
- setFilter(String) -
Method in class org.apache.hadoop.hbase.tmpl.master.MasterStatusTmpl
-
- setFilter(String) -
Method in class org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl.ImplData
-
- setFilter(String) -
Method in class org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl
-
- setFilterBytes(ByteString) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner.Builder
optional string filter = 8;
- setFilterIfMissing(boolean) -
Method in class org.apache.hadoop.hbase.filter.SingleColumnValueFilter
- Set whether entire row should be filtered if column is not found.
- setFilterIfMissing(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.SingleColumnValueFilter.Builder
optional bool filter_if_missing = 5;
- setFilters(int, FilterProtos.Filter) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.FilterList.Builder
repeated .Filter filters = 2;
- setFilters(int, FilterProtos.Filter.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.FilterList.Builder
repeated .Filter filters = 2;
- setFilterString(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.TScan
-
- setFilterString(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.TScan
-
- setFilterString(byte[]) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TGet
-
- setFilterString(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TGet
-
- setFilterString(byte[]) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TScan
-
- setFilterString(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TScan
-
- setFilterStringIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.TScan
-
- setFilterStringIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TGet
-
- setFilterStringIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TScan
-
- setFirst(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder
required bytes first = 1;
- setFirst(T1) -
Method in class org.apache.hadoop.hbase.util.Pair
- Replace the first element of the pair.
- setFirst(A) -
Method in class org.apache.hadoop.hbase.util.Triple
-
- setFirstDataBlockOffset(long) -
Method in class org.apache.hadoop.hbase.io.hfile.FixedFileTrailer
-
- setFirstDataBlockOffset(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HFileProtos.FileTrailerProto.Builder
optional uint64 first_data_block_offset = 9;
- setFirstInsertionIndex(int) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.encode.tokenize.TokenizerNode
-
- setFirstPart(int, ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.Builder
repeated bytes first_part = 1;
- setFirstRow(String) -
Method in class org.apache.hadoop.hbase.util.RegionSplitter.HexStringSplit
-
- setFirstRow(byte[]) -
Method in class org.apache.hadoop.hbase.util.RegionSplitter.HexStringSplit
-
- setFirstRow(String) -
Method in interface org.apache.hadoop.hbase.util.RegionSplitter.SplitAlgorithm
- In HBase, the last row is represented by an empty byte array.
- setFirstRow(byte[]) -
Method in interface org.apache.hadoop.hbase.util.RegionSplitter.SplitAlgorithm
- Set the first row
- setFirstRow(String) -
Method in class org.apache.hadoop.hbase.util.RegionSplitter.UniformSplit
-
- setFirstRow(byte[]) -
Method in class org.apache.hadoop.hbase.util.RegionSplitter.UniformSplit
-
- setFixAssignments(boolean) -
Method in class org.apache.hadoop.hbase.util.HBaseFsck
- Fix inconsistencies found by fsck.
- setFixEmptyMetaCells(boolean) -
Method in class org.apache.hadoop.hbase.util.HBaseFsck
-
- setFixHdfsHoles(boolean) -
Method in class org.apache.hadoop.hbase.util.HBaseFsck
-
- setFixHdfsOrphans(boolean) -
Method in class org.apache.hadoop.hbase.util.HBaseFsck
-
- setFixHdfsOverlaps(boolean) -
Method in class org.apache.hadoop.hbase.util.HBaseFsck
-
- setFixMeta(boolean) -
Method in class org.apache.hadoop.hbase.util.HBaseFsck
-
- setFixReferenceFiles(boolean) -
Method in class org.apache.hadoop.hbase.util.HBaseFsck
-
- setFixSplitParents(boolean) -
Method in class org.apache.hadoop.hbase.util.HBaseFsck
-
- setFixTableLocks(boolean) -
Method in class org.apache.hadoop.hbase.util.HBaseFsck
- Set table locks fix mode.
- setFixTableOrphans(boolean) -
Method in class org.apache.hadoop.hbase.util.HBaseFsck
-
- setFixTableZNodes(boolean) -
Method in class org.apache.hadoop.hbase.util.HBaseFsck
- Set orphaned table ZNodes fix mode.
- setFixVersionFile(boolean) -
Method in class org.apache.hadoop.hbase.util.HBaseFsck
-
- setFlushed(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionResponse.Builder
optional bool flushed = 2;
- setFollowingKvCount(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.Builder
optional uint32 following_kv_count = 7;
- setForce(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest.Builder
optional bool force = 2 [default = false];
- setForcible(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest.Builder
optional bool forcible = 3 [default = false];
- setForcible(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest.Builder
optional bool forcible = 3 [default = false];
- setFormat(String) -
Method in class org.apache.hadoop.hbase.tmpl.common.TaskMonitorTmpl.ImplData
-
- setFormat(String) -
Method in class org.apache.hadoop.hbase.tmpl.common.TaskMonitorTmpl
-
- setFormat(String) -
Method in class org.apache.hadoop.hbase.tmpl.master.MasterStatusTmpl.ImplData
-
- setFormat(String) -
Method in class org.apache.hadoop.hbase.tmpl.master.MasterStatusTmpl
-
- setFormat(String) -
Method in class org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl.ImplData
-
- setFormat(String) -
Method in class org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl
-
- setFoundKV(boolean) -
Method in class org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter
-
- setFrags(Map<String, Integer>) -
Method in class org.apache.hadoop.hbase.tmpl.master.MasterStatusTmpl.ImplData
-
- setFrags(Map<String, Integer>) -
Method in class org.apache.hadoop.hbase.tmpl.master.MasterStatusTmpl
-
- setFrom(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.Builder
optional uint64 from = 1;
- setFsDefault(Configuration, Path) -
Static method in class org.apache.hadoop.hbase.util.FSUtils
-
- setFsToken(SecureBulkLoadProtos.DelegationToken) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.SecureBulkLoadHFilesRequest.Builder
required .DelegationToken fs_token = 3;
- setFsToken(SecureBulkLoadProtos.DelegationToken.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.SecureBulkLoadHFilesRequest.Builder
required .DelegationToken fs_token = 3;
- setFuzzyKeysData(int, HBaseProtos.BytesBytesPair) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.FuzzyRowFilter.Builder
repeated .BytesBytesPair fuzzy_keys_data = 1;
- setFuzzyKeysData(int, HBaseProtos.BytesBytesPair.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.FuzzyRowFilter.Builder
repeated .BytesBytesPair fuzzy_keys_data = 1;
- setGauge(String, long) -
Method in interface org.apache.hadoop.hbase.metrics.BaseSource
- Set a gauge to a specific value.
- setGauge(String, long) -
Method in class org.apache.hadoop.hbase.metrics.BaseSourceImpl
- Set a single gauge to a value.
- setGeneralBloomFilterFaulty() -
Method in class org.apache.hadoop.hbase.regionserver.StoreFile.Reader
-
- setGenericException(ErrorHandlingProtos.GenericExceptionMessage) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder
optional .GenericExceptionMessage generic_exception = 2;
- setGenericException(ErrorHandlingProtos.GenericExceptionMessage.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder
optional .GenericExceptionMessage generic_exception = 2;
- setGet(ClientProtos.Get) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Action.Builder
optional .Get get = 3;
- setGet(ClientProtos.Get.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Action.Builder
optional .Get get = 3;
- setGet(ClientProtos.Get) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest.Builder
required .Get get = 2;
- setGet(ClientProtos.Get.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest.Builder
required .Get get = 2;
- setGet(TGet) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.exists_args
- the TGet to check for
- setGet(TGet) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.get_args
- the TGet to fetch
- setGetIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.exists_args
-
- setGetIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.get_args
-
- setGets(List<TGet>) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.getMultiple_args
- a list of TGets to fetch, the Result list
will have the Results at corresponding positions
or null if there was an error
- setGetsIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.getMultiple_args
-
- setGlobalPermission(AccessControlProtos.GlobalPermission) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.Permission.Builder
optional .GlobalPermission global_permission = 2;
- setGlobalPermission(AccessControlProtos.GlobalPermission.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.Permission.Builder
optional .GlobalPermission global_permission = 2;
- setHasCompression(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALHeader.Builder
optional bool has_compression = 1;
- setHash(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.EncryptionProtos.WrappedKey.Builder
optional bytes hash = 5;
- setHasMoreResultsContext(boolean) -
Method in class org.apache.hadoop.hbase.client.RegionServerCallable
-
- setHasTagCompression(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALHeader.Builder
optional bool has_tag_compression = 3;
- setHbaseVersion(FSProtos.HBaseVersionFileContent) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder
optional .HBaseVersionFileContent hbase_version = 1;
- setHbaseVersion(FSProtos.HBaseVersionFileContent.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder
optional .HBaseVersionFileContent hbase_version = 1;
- setHeader(String, String) -
Method in class org.apache.hadoop.hbase.rest.filter.GZIPResponseWrapper
-
- setHeaders(Header[]) -
Method in class org.apache.hadoop.hbase.rest.client.Response
-
- setHeapOccupancy(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats.Builder
optional int32 heapOccupancy = 2 [default = 0];
- setHeapSizeMB(int) -
Method in class org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel.Node
-
- setHeapSizeMB(int) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder
optional int32 heapSizeMB = 4;
- setHfile(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Builder
optional string hfile = 3;
- setHfileBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Builder
optional string hfile = 3;
- setHFileCorruptionChecker(HFileCorruptionChecker) -
Method in class org.apache.hadoop.hbase.util.HBaseFsck
-
- setHostName(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder
required string host_name = 1;
- setHostname(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ExceptionResponse.Builder
optional string hostname = 3;
- setHostNameBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder
required string host_name = 1;
- setHostnameBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ExceptionResponse.Builder
optional string hostname = 3;
- setHTable(HTable) -
Method in class org.apache.hadoop.hbase.mapred.TableInputFormatBase
- Deprecated. Allows subclasses to set the
HTable.
- setHTable(HTable) -
Method in class org.apache.hadoop.hbase.mapred.TableRecordReader
- Deprecated.
- setHTable(HTable) -
Method in class org.apache.hadoop.hbase.mapred.TableRecordReaderImpl
- Deprecated.
- setHTable(HTable) -
Method in class org.apache.hadoop.hbase.mapreduce.TableInputFormatBase
- Allows subclasses to set the
HTable.
- setHTable(HTable) -
Method in class org.apache.hadoop.hbase.mapreduce.TableRecordReader
- Sets the HBase table.
- setHTable(HTable) -
Method in class org.apache.hadoop.hbase.mapreduce.TableRecordReaderImpl
- Sets the HBase table.
- setIa(IllegalArgument) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.atomicIncrement_result
-
- setIa(IllegalArgument) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.createTable_result
-
- setIa(IllegalArgument) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRow_result
-
- setIa(IllegalArgument) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRows_result
-
- setIa(IllegalArgument) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRowsTs_result
-
- setIa(IllegalArgument) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRowTs_result
-
- setIa(IllegalArgument) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerClose_result
-
- setIa(IllegalArgument) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerGet_result
-
- setIa(IllegalArgument) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerGetList_result
-
- setIa(TIllegalArgument) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.closeScanner_result
- if the scannerId is invalid
- setIa(TIllegalArgument) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.getScannerRows_result
- if the scannerId is invalid
- setIaIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.atomicIncrement_result
-
- setIaIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.createTable_result
-
- setIaIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRow_result
-
- setIaIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRows_result
-
- setIaIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRowsTs_result
-
- setIaIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRowTs_result
-
- setIaIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerClose_result
-
- setIaIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerGet_result
-
- setIaIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerGetList_result
-
- setIaIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.closeScanner_result
-
- setIaIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.getScannerRows_result
-
- setId(String) -
Method in class org.apache.hadoop.hbase.client.OperationWithAttributes
- This method allows you to set an identifier on an operation.
- setId(long) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.encode.tokenize.TokenizerNode
-
- setId(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos.AuthenticationKey.Builder
required int32 id = 1;
- setId(long) -
Method in class org.apache.hadoop.hbase.rest.model.TableRegionModel
-
- setId(long) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.Builder
optional int64 id = 4;
- setId(int) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerClose_args
- id of a scanner returned by scannerOpen
- setId(int) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerGet_args
- id of a scanner returned by scannerOpen
- setId(int) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerGetList_args
- id of a scanner returned by scannerOpen
- setId(long) -
Method in class org.apache.hadoop.hbase.thrift.generated.TRegionInfo
-
- setIdentifier(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos.Token.Builder
optional bytes identifier = 1;
- setIdentifier(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.DelegationToken.Builder
optional bytes identifier = 1;
- setIdIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerClose_args
-
- setIdIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerGet_args
-
- setIdIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerGetList_args
-
- setIdIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.TRegionInfo
-
- setIfOlderThanTs(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest.Builder
optional uint64 if_older_than_ts = 2;
- setIgnorePreCheckPermission(boolean) -
Method in class org.apache.hadoop.hbase.util.HBaseFsck
-
- setIncludesMvcc(boolean) -
Method in class org.apache.hadoop.hbase.io.hfile.HFileContext
-
- setIncludesMvccVersion(byte) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta
-
- setIncludesMvccVersion(boolean) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta
-
- setIncludesTags(boolean) -
Method in class org.apache.hadoop.hbase.io.hfile.HFileContext
-
- setIncrement(TIncrement) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.increment_args
- The single increment to apply
- setIncrement(TIncrement) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.increment_args
- the TIncrement to increment
- setIncrementIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.increment_args
-
- setIncrementIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.increment_args
-
- setIncrements(List<TIncrement>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.incrementRows_args
- The list of increments
- setIncrementsIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.incrementRows_args
-
- setIndex(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Action.Builder
optional uint32 index = 1;
- setIndex(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrException.Builder
optional uint32 index = 1;
- setInfoFamilyCachingForMeta(HTableDescriptor, boolean) -
Static method in class org.apache.hadoop.hbase.master.MasterFileSystem
- Enable in memory caching for hbase:meta
- setInfoPort(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo.Builder
optional int32 infoPort = 1;
- setInfoServerPort(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder
optional uint32 info_server_port = 9;
- setInMemory(boolean) -
Method in class org.apache.hadoop.hbase.HColumnDescriptor
-
- setInMemory(boolean) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Builder
optional bool inMemory = 4;
- setInMemory(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.ColumnDescriptor
-
- setInMemoryIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.ColumnDescriptor
-
- setInput(JobConf, String, Path) -
Static method in class org.apache.hadoop.hbase.mapred.TableSnapshotInputFormat
- Configures the job to use TableSnapshotInputFormat to read from a snapshot.
- setInput(Job, String, Path) -
Static method in class org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat
- Configures the job to use TableSnapshotInputFormat to read from a snapshot.
- setInput(Configuration, String, Path) -
Static method in class org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl
- Configures the job to use TableSnapshotInputFormat to read from a snapshot.
- setInputColumns(byte[][]) -
Method in class org.apache.hadoop.hbase.mapred.TableInputFormatBase
- Deprecated.
- setInputColumns(byte[][]) -
Method in class org.apache.hadoop.hbase.mapred.TableRecordReader
- Deprecated.
- setInputColumns(byte[][]) -
Method in class org.apache.hadoop.hbase.mapred.TableRecordReaderImpl
- Deprecated.
- setInsertionIndexes(int) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.encode.tokenize.TokenizerNode
-
- setInstance(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder
optional string instance = 2;
- setInstanceBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder
optional string instance = 2;
- setInterpreterClassName(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateRequest.Builder
required string interpreter_class_name = 1;
- setInterpreterClassNameBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateRequest.Builder
required string interpreter_class_name = 1;
- setInterpreterSpecificBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateRequest.Builder
optional bytes interpreter_specific_bytes = 3;
- setIntHeader(String, int) -
Method in class org.apache.hadoop.hbase.rest.filter.GZIPResponseWrapper
-
- setIo(IOError) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.atomicIncrement_result
-
- setIo(IOError) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.compact_result
-
- setIo(IOError) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.createTable_result
-
- setIo(IOError) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAll_result
-
- setIo(IOError) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAllRow_result
-
- setIo(IOError) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAllRowTs_result
-
- setIo(IOError) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAllTs_result
-
- setIo(IOError) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteTable_result
-
- setIo(IOError) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.disableTable_result
-
- setIo(IOError) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.enableTable_result
-
- setIo(IOError) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.get_result
-
- setIo(IOError) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getColumnDescriptors_result
-
- setIo(IOError) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRegionInfo_result
-
- setIo(IOError) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRow_result
-
- setIo(IOError) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowOrBefore_result
-
- setIo(IOError) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRows_result
-
- setIo(IOError) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsTs_result
-
- setIo(IOError) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsWithColumns_result
-
- setIo(IOError) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsWithColumnsTs_result
-
- setIo(IOError) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowTs_result
-
- setIo(IOError) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowWithColumns_result
-
- setIo(IOError) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowWithColumnsTs_result
-
- setIo(IOError) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getTableNames_result
-
- setIo(IOError) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getTableRegions_result
-
- setIo(IOError) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getVer_result
-
- setIo(IOError) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getVerTs_result
-
- setIo(IOError) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.increment_result
-
- setIo(IOError) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.incrementRows_result
-
- setIo(IOError) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.isTableEnabled_result
-
- setIo(IOError) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.majorCompact_result
-
- setIo(IOError) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRow_result
-
- setIo(IOError) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRows_result
-
- setIo(IOError) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRowsTs_result
-
- setIo(IOError) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRowTs_result
-
- setIo(IOError) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerClose_result
-
- setIo(IOError) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerGet_result
-
- setIo(IOError) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerGetList_result
-
- setIo(IOError) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpen_result
-
- setIo(IOError) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenTs_result
-
- setIo(IOError) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithPrefix_result
-
- setIo(IOError) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithScan_result
-
- setIo(IOError) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStop_result
-
- setIo(IOError) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStopTs_result
-
- setIo(TIOError) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.append_result
-
- setIo(TIOError) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndDelete_result
-
- setIo(TIOError) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndPut_result
-
- setIo(TIOError) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.closeScanner_result
-
- setIo(TIOError) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.deleteMultiple_result
-
- setIo(TIOError) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.deleteSingle_result
-
- setIo(TIOError) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.exists_result
-
- setIo(TIOError) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.get_result
-
- setIo(TIOError) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.getMultiple_result
-
- setIo(TIOError) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.getScannerResults_result
-
- setIo(TIOError) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.getScannerRows_result
-
- setIo(TIOError) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.increment_result
-
- setIo(TIOError) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.mutateRow_result
-
- setIo(TIOError) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.openScanner_result
-
- setIo(TIOError) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.put_result
-
- setIo(TIOError) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.putMultiple_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.atomicIncrement_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.compact_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.createTable_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAll_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAllRow_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAllRowTs_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAllTs_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteTable_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.disableTable_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.enableTable_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.get_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getColumnDescriptors_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRegionInfo_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRow_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowOrBefore_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRows_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsTs_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsWithColumns_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsWithColumnsTs_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowTs_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowWithColumns_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowWithColumnsTs_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getTableNames_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getTableRegions_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getVer_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getVerTs_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.increment_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.incrementRows_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.isTableEnabled_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.majorCompact_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRow_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRows_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRowsTs_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRowTs_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerClose_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerGet_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerGetList_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpen_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenTs_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithPrefix_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithScan_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStop_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStopTs_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.append_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndDelete_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndPut_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.closeScanner_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.deleteMultiple_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.deleteSingle_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.exists_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.get_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.getMultiple_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.getScannerResults_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.getScannerRows_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.increment_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.mutateRow_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.openScanner_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.put_result
-
- setIoIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.putMultiple_result
-
- setIsDelete(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Mutation
-
- setIsDeleteIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Mutation
-
- setIsMajor(boolean) -
Method in class org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest
- Specify if this compaction should be a major compaction based on the state of the store
- setIsMasterRunning(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.Builder
required bool is_master_running = 1;
- setIsolationLevel(IsolationLevel) -
Method in class org.apache.hadoop.hbase.client.Query
- Set the isolation level for this query.
- setIsRecovering(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.Builder
optional bool isRecovering = 3;
- setIsShared(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock.Builder
optional bool is_shared = 4;
- setIssueDate(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos.TokenIdentifier.Builder
optional int64 issue_date = 4;
- setIv(byte[]) -
Method in class org.apache.hadoop.hbase.io.crypto.aes.AESDecryptor
-
- setIv(byte[]) -
Method in class org.apache.hadoop.hbase.io.crypto.aes.AESEncryptor
-
- setIv(byte[]) -
Method in interface org.apache.hadoop.hbase.io.crypto.Decryptor
- Set the initialization vector
- setIv(byte[]) -
Method in interface org.apache.hadoop.hbase.io.crypto.Encryptor
- Set the initialization vector
- setIv(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.EncryptionProtos.WrappedKey.Builder
optional bytes iv = 4;
- setJerseyVersion(String) -
Method in class org.apache.hadoop.hbase.rest.model.VersionModel
-
- setJerseyVersion(String) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version.Builder
optional string jerseyVersion = 5;
- setJerseyVersionBytes(ByteString) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version.Builder
optional string jerseyVersion = 5;
- setJVMVersion(String) -
Method in class org.apache.hadoop.hbase.rest.model.VersionModel
-
- setJvmVersion(String) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version.Builder
optional string jvmVersion = 2;
- setJvmVersionBytes(ByteString) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version.Builder
optional string jvmVersion = 2;
- setKeepDeletedCells(boolean) -
Method in class org.apache.hadoop.hbase.HColumnDescriptor
- Deprecated. use
HColumnDescriptor.setKeepDeletedCells(KeepDeletedCells)
- setKeepDeletedCells(KeepDeletedCells) -
Method in class org.apache.hadoop.hbase.HColumnDescriptor
-
- setKey(Key) -
Method in class org.apache.hadoop.hbase.io.crypto.aes.AESDecryptor
-
- setKey(Key) -
Method in class org.apache.hadoop.hbase.io.crypto.aes.AESEncryptor
-
- setKey(Key) -
Method in class org.apache.hadoop.hbase.io.crypto.Context
-
- setKey(Key) -
Method in interface org.apache.hadoop.hbase.io.crypto.Decryptor
- Set the secret key
- setKey(Key) -
Method in class org.apache.hadoop.hbase.io.crypto.Encryption.Context
-
- setKey(byte[]) -
Method in class org.apache.hadoop.hbase.io.crypto.Encryption.Context
-
- setKey(Key) -
Method in interface org.apache.hadoop.hbase.io.crypto.Encryptor
- Set the secret key
- setKey(WALProtos.WALKey) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.Builder
required .WALKey key = 1;
- setKey(WALProtos.WALKey.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.Builder
required .WALKey key = 1;
- setKey(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos.AuthenticationKey.Builder
required bytes key = 3;
- setKey(byte[]) -
Method in class org.apache.hadoop.hbase.rest.model.RowModel
-
- setKey(ByteString) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row.Builder
required bytes key = 1;
- setKeyId(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos.TokenIdentifier.Builder
required int32 key_id = 3;
- setKeyType(CellProtos.CellType) -
Method in class org.apache.hadoop.hbase.protobuf.generated.CellProtos.KeyValue.Builder
optional .CellType key_type = 5;
- setKeyValueBytes(int, ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.Builder
repeated bytes key_value_bytes = 2;
- setKind(AuthenticationProtos.TokenIdentifier.Kind) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos.TokenIdentifier.Builder
required .TokenIdentifier.Kind kind = 1;
- setKind(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.DelegationToken.Builder
optional string kind = 3;
- setKindBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.DelegationToken.Builder
optional string kind = 3;
- setLabel(int, String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Authorizations.Builder
repeated string label = 1;
- setLabel(int, ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.ListLabelsResponse.Builder
repeated bytes label = 1;
- setLabel(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabel.Builder
required bytes label = 1;
- setLabels(int, String) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner.Builder
repeated string labels = 10;
- setLabels(List<String>) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TAuthorization
-
- setLabelsIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TAuthorization
-
- setLastAppliedOpAge(long) -
Method in interface org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSinkSource
-
- setLastAppliedOpAge(long) -
Method in class org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSinkSourceImpl
-
- setLastContact(long) -
Method in class org.apache.hadoop.hbase.ipc.RpcServer.Connection
-
- setLastDataBlockOffset(long) -
Method in class org.apache.hadoop.hbase.io.hfile.FixedFileTrailer
-
- setLastDataBlockOffset(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HFileProtos.FileTrailerProto.Builder
optional uint64 last_data_block_offset = 10;
- setLastFlushedSequenceId(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse.Builder
required uint64 last_flushed_sequence_id = 1;
- setLastFlushedSequenceId(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds.Builder
required uint64 last_flushed_sequence_id = 1;
- setLastFlushTime(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionResponse.Builder
required uint64 last_flush_time = 1;
- setLastRow(String) -
Method in class org.apache.hadoop.hbase.util.RegionSplitter.HexStringSplit
-
- setLastRow(byte[]) -
Method in class org.apache.hadoop.hbase.util.RegionSplitter.HexStringSplit
-
- setLastRow(String) -
Method in interface org.apache.hadoop.hbase.util.RegionSplitter.SplitAlgorithm
- In HBase, the last row is represented by an empty byte array.
- setLastRow(byte[]) -
Method in interface org.apache.hadoop.hbase.util.RegionSplitter.SplitAlgorithm
- Set the last row
- setLastRow(String) -
Method in class org.apache.hadoop.hbase.util.RegionSplitter.UniformSplit
-
- setLastRow(byte[]) -
Method in class org.apache.hadoop.hbase.util.RegionSplitter.UniformSplit
-
- setLastShippedAge(long) -
Method in class org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationGlobalSourceSource
-
- setLastShippedAge(long) -
Method in interface org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceSource
-
- setLastShippedAge(long) -
Method in class org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceSourceImpl
-
- setLatestVersionOnly(boolean) -
Method in class org.apache.hadoop.hbase.filter.SingleColumnValueFilter
- Set whether only the latest version of the column value should be compared.
- setLatestVersionOnly(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.SingleColumnValueFilter.Builder
optional bool latest_version_only = 6;
- setLeastSigBits(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.Builder
required uint64 least_sig_bits = 1;
- setLenAsVal(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.KeyOnlyFilter.Builder
required bool len_as_val = 1;
- setLength(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.EncryptionProtos.WrappedKey.Builder
required uint32 length = 2;
- setLength(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.CellBlockMeta.Builder
optional uint32 length = 1;
- setLength(int) -
Method in interface org.apache.hadoop.hbase.util.ByteRange
- Update the length of this range.
- setLength(int) -
Method in interface org.apache.hadoop.hbase.util.PositionedByteRange
-
- setLength(int) -
Method in class org.apache.hadoop.hbase.util.SimpleByteRange
-
- setLength(int) -
Method in class org.apache.hadoop.hbase.util.SimplePositionedByteRange
- Update the length of this range.
- setLimit(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.ColumnCountGetFilter.Builder
required int32 limit = 1;
- setLimit(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.ColumnPaginationFilter.Builder
required int32 limit = 1;
- setLimit(int) -
Method in class org.apache.hadoop.hbase.tmpl.master.AssignmentManagerStatusTmpl.ImplData
-
- setLimit(int) -
Method in class org.apache.hadoop.hbase.tmpl.master.AssignmentManagerStatusTmpl
-
- setLineNumber(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.StackTraceElementMessage.Builder
optional int32 line_number = 4;
- setListener(EventHandler.EventHandlerListener) -
Method in class org.apache.hadoop.hbase.executor.EventHandler
-
- setLiveNodes(List<StorageClusterStatusModel.Node>) -
Method in class org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel
-
- setLiveNodes(int, StorageClusterStatusMessage.StorageClusterStatus.Node) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Builder
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;
- setLiveNodes(int, StorageClusterStatusMessage.StorageClusterStatus.Node.Builder) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Builder
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;
- setLiveServers(int, ClusterStatusProtos.LiveServerInfo) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder
repeated .LiveServerInfo live_servers = 2;
- setLiveServers(int, ClusterStatusProtos.LiveServerInfo.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder
repeated .LiveServerInfo live_servers = 2;
- setLoad(ClusterStatusProtos.ServerLoad) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest.Builder
optional .ServerLoad load = 2;
- setLoad(ClusterStatusProtos.ServerLoad.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest.Builder
optional .ServerLoad load = 2;
- setLoadColumnFamiliesOnDemand(boolean) -
Method in class org.apache.hadoop.hbase.client.Scan
- Set the value indicating whether loading CFs on demand should be allowed (cluster
default is false).
- setLoadColumnFamiliesOnDemand(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.Builder
optional bool load_column_families_on_demand = 13;
- setLoaded(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileResponse.Builder
required bool loaded = 1;
- setLoaded(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.SecureBulkLoadHFilesResponse.Builder
required bool loaded = 1;
- setLoadOnOpenDataOffset(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HFileProtos.FileTrailerProto.Builder
optional uint64 load_on_open_data_offset = 2;
- setLoadOnOpenOffset(long) -
Method in class org.apache.hadoop.hbase.io.hfile.FixedFileTrailer
-
- setLoadStats(ClientProtos.RegionLoadStats) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrException.Builder
optional .RegionLoadStats loadStats = 5;
- setLoadStats(ClientProtos.RegionLoadStats.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrException.Builder
optional .RegionLoadStats loadStats = 5;
- setLocation(HRegionLocation) -
Method in class org.apache.hadoop.hbase.client.RegionServerCallable
-
- setLocation(String) -
Method in class org.apache.hadoop.hbase.rest.model.TableRegionModel
-
- setLocation(String) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.Builder
optional string location = 5;
- setLocationBytes(ByteString) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.Builder
optional string location = 5;
- setLocations(Path, Path...) -
Method in class org.apache.hadoop.hbase.io.FileLink
- NOTE: This method must be used only in the constructor!
It creates a List with the specified locations for the link.
- setLocations(int, String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.TableSnapshotRegionSplit.Builder
repeated string locations = 2;
- setLockOwner(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock.Builder
required string lock_owner = 1;
- setLockOwner(HBaseProtos.ServerName) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock.Builder
optional .ServerName lock_owner = 2;
- setLockOwner(HBaseProtos.ServerName.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock.Builder
optional .ServerName lock_owner = 2;
- setLockOwnerBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationLock.Builder
required string lock_owner = 1;
- setLogPosition(String, String, long) -
Method in interface org.apache.hadoop.hbase.replication.ReplicationQueues
- Set the current position for a specific HLog in a given queue.
- setLogPosition(String, String, long) -
Method in class org.apache.hadoop.hbase.replication.ReplicationQueuesZKImpl
-
- setLogRecoveryMode() -
Method in class org.apache.hadoop.hbase.master.MasterFileSystem
- The function is used in SSH to set recovery mode based on configuration after all outstanding
log split tasks drained.
- setLogSequenceNumber(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.Builder
required uint64 log_sequence_number = 3;
- setLongMsg(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg.Builder
required int64 long_msg = 1;
- setMajor(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest.Builder
optional bool major = 2;
- setMajorRange(byte[], byte[]) -
Method in class org.apache.hadoop.hbase.regionserver.compactions.StripeCompactionPolicy.StripeCompactionRequest
- Sets compaction "major range".
- setMapEntries(int, HBaseProtos.NameStringPair) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.Builder
repeated .NameStringPair map_entries = 1;
- setMapEntries(int, HBaseProtos.NameStringPair.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.Builder
repeated .NameStringPair map_entries = 1;
- setMapEntry(int, HBaseProtos.BytesBytesPair) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HFileProtos.FileInfoProto.Builder
repeated .BytesBytesPair map_entry = 1;
- setMapEntry(int, HBaseProtos.BytesBytesPair.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HFileProtos.FileInfoProto.Builder
repeated .BytesBytesPair map_entry = 1;
- setMapperClass(Job, Class<? extends Mapper<ImmutableBytesWritable, Result, K2, V2>>) -
Static method in class org.apache.hadoop.hbase.mapreduce.MultithreadedTableMapper
- Set the application's mapper class.
- setMaster(HBaseProtos.ServerName) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder
optional .ServerName master = 7;
- setMaster(HBaseProtos.ServerName.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder
optional .ServerName master = 7;
- setMaster(HBaseProtos.ServerName) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master.Builder
required .ServerName master = 1;
- setMaster(HBaseProtos.ServerName.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master.Builder
required .ServerName master = 1;
- setMaster(HMaster) -
Method in class org.apache.hadoop.hbase.tmpl.master.BackupMasterStatusTmpl.ImplData
-
- setMaster(HMaster) -
Method in class org.apache.hadoop.hbase.tmpl.master.MasterStatusTmpl.ImplData
-
- setMaster(HMaster) -
Method in class org.apache.hadoop.hbase.tmpl.master.RegionServerListTmpl.ImplData
-
- setMasterAddress(ZooKeeperWatcher, String, ServerName) -
Static method in class org.apache.hadoop.hbase.zookeeper.MasterAddressTracker
- Set master address into the
master znode or into the backup
subdirectory of backup masters; switch off the passed in znode
path.
- setMasterCoprocessors(int, HBaseProtos.Coprocessor) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder
repeated .Coprocessor master_coprocessors = 6;
- setMasterCoprocessors(int, HBaseProtos.Coprocessor.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder
repeated .Coprocessor master_coprocessors = 6;
- setMasterServices(MasterServices) -
Method in class org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer
-
- setMasterServices(MasterServices) -
Method in class org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer
-
- setMasterServices(MasterServices) -
Method in interface org.apache.hadoop.hbase.master.LoadBalancer
- Set the master service.
- setMaxAttempts(int) -
Method in class org.apache.hadoop.hbase.util.RetryCounter.RetryConfig
-
- setMaxChunkSize(int) -
Method in class org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexWriter
-
- setMaxColumn(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.ColumnRangeFilter.Builder
optional bytes max_column = 3;
- setMaxColumnInclusive(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.ColumnRangeFilter.Builder
optional bool max_column_inclusive = 4;
- setMaxFileSize(long) -
Method in class org.apache.hadoop.hbase.client.UnmodifyableHTableDescriptor
-
- setMaxFileSize(long) -
Method in class org.apache.hadoop.hbase.HTableDescriptor
- Sets the maximum size upto which a region can grow to after which a region
split is triggered.
- setMaxHeapMB(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder
optional uint32 max_heap_MB = 4;
- setMaxHeapSizeMB(int) -
Method in class org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel.Node
-
- setMaxHeapSizeMB(int) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder
optional int32 maxHeapSizeMB = 5;
- setMaxMemstoreTS(long) -
Method in class org.apache.hadoop.hbase.regionserver.StoreFile
-
- setMaxMerge(int) -
Method in class org.apache.hadoop.hbase.util.HBaseFsck
-
- setMaxOverlapsToSideline(int) -
Method in class org.apache.hadoop.hbase.util.HBaseFsck
-
- setMaxPoolSize(int) -
Method in class org.apache.hadoop.hbase.thrift.IncrementCoalescer
-
- setMaxPoolSize(int) -
Method in interface org.apache.hadoop.hbase.thrift.IncrementCoalescerMBean
-
- setMaxQualifierLength(int) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta
-
- setMaxQueueSize(int) -
Method in class org.apache.hadoop.hbase.thrift.IncrementCoalescer
-
- setMaxQueueSize(int) -
Method in interface org.apache.hadoop.hbase.thrift.IncrementCoalescerMBean
-
- setMaxResultSize(long) -
Method in class org.apache.hadoop.hbase.client.Scan
- Set the maximum result size.
- setMaxResultSize(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.Builder
optional uint64 max_result_size = 10;
- setMaxResultsPerColumnFamily(int) -
Method in class org.apache.hadoop.hbase.client.Get
- Set the maximum number of values to return per row per Column Family
- setMaxResultsPerColumnFamily(int) -
Method in class org.apache.hadoop.hbase.client.Scan
- Set the maximum number of values to return per row per Column Family
- setMaxRowLength(int) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta
-
- setMaxSize(long) -
Method in class org.apache.hadoop.hbase.io.hfile.LruBlockCache
-
- setMaxSleepTime(long) -
Method in class org.apache.hadoop.hbase.util.RetryCounter.RetryConfig
-
- setMaxStamp(long) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TTimeRange
-
- setMaxStampIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TTimeRange
-
- setMaxTagsLength(int) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta
-
- setMaxValues(int) -
Method in class org.apache.hadoop.hbase.rest.RowSpec
-
- setMaxVersions() -
Method in class org.apache.hadoop.hbase.client.Get
- Get all available versions.
- setMaxVersions(int) -
Method in class org.apache.hadoop.hbase.client.Get
- Get up to the specified number of versions of each column.
- setMaxVersions() -
Method in class org.apache.hadoop.hbase.client.Scan
- Get all available versions.
- setMaxVersions(int) -
Method in class org.apache.hadoop.hbase.client.Scan
- Get up to the specified number of versions of each column.
- setMaxVersions(int) -
Method in class org.apache.hadoop.hbase.HColumnDescriptor
-
- setMaxVersions(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Get.Builder
optional uint32 max_versions = 6 [default = 1];
- setMaxVersions(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.Builder
optional uint32 max_versions = 7 [default = 1];
- setMaxVersions(int) -
Method in class org.apache.hadoop.hbase.rest.model.ScannerModel
-
- setMaxVersions(int) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder
optional int32 maxVersions = 4;
- setMaxVersions(int) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner.Builder
optional int32 maxVersions = 7;
- setMaxVersions(int) -
Method in class org.apache.hadoop.hbase.rest.RowSpec
-
- setMaxVersions(int) -
Method in class org.apache.hadoop.hbase.thrift.generated.ColumnDescriptor
-
- setMaxVersions(int) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TGet
-
- setMaxVersions(int) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TScan
-
- setMaxVersionsIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.ColumnDescriptor
-
- setMaxVersionsIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TGet
-
- setMaxVersionsIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TScan
-
- setMemStoreFlushSize(long) -
Method in class org.apache.hadoop.hbase.client.UnmodifyableHTableDescriptor
-
- setMemStoreFlushSize(long) -
Method in class org.apache.hadoop.hbase.HTableDescriptor
- Represents the maximum size of the memstore after which the contents of the
memstore are flushed to the filesystem.
- setMemstoreLoad(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats.Builder
optional int32 memstoreLoad = 1 [default = 0];
- setMemstoreSizeMB(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder
optional uint32 memstore_size_MB = 6;
- setMemstoreSizeMB(int) -
Method in class org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel.Node.Region
-
- setMemstoreSizeMB(int) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder
optional int32 memstoreSizeMB = 5;
- setMerged(boolean) -
Method in class org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo
-
- setMessage(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.GenericExceptionMessage.Builder
optional string message = 2;
- setMessage(String) -
Method in exception org.apache.hadoop.hbase.thrift.generated.AlreadyExists
-
- setMessage(String) -
Method in exception org.apache.hadoop.hbase.thrift.generated.IllegalArgument
-
- setMessage(String) -
Method in exception org.apache.hadoop.hbase.thrift.generated.IOError
-
- setMessage(String) -
Method in exception org.apache.hadoop.hbase.thrift2.generated.TIllegalArgument
-
- setMessage(String) -
Method in exception org.apache.hadoop.hbase.thrift2.generated.TIOError
-
- setMessageBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.GenericExceptionMessage.Builder
optional string message = 2;
- setMessageIsSet(boolean) -
Method in exception org.apache.hadoop.hbase.thrift.generated.AlreadyExists
-
- setMessageIsSet(boolean) -
Method in exception org.apache.hadoop.hbase.thrift.generated.IllegalArgument
-
- setMessageIsSet(boolean) -
Method in exception org.apache.hadoop.hbase.thrift.generated.IOError
-
- setMessageIsSet(boolean) -
Method in exception org.apache.hadoop.hbase.thrift2.generated.TIllegalArgument
-
- setMessageIsSet(boolean) -
Method in exception org.apache.hadoop.hbase.thrift2.generated.TIOError
-
- setMetaIndexCount(int) -
Method in class org.apache.hadoop.hbase.io.hfile.FixedFileTrailer
-
- setMetaIndexCount(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HFileProtos.FileTrailerProto.Builder
optional uint32 meta_index_count = 6;
- setMetaLocation(ServerName) -
Method in class org.apache.hadoop.hbase.tmpl.master.MasterStatusTmpl.ImplData
-
- setMetaLocation(ServerName) -
Method in class org.apache.hadoop.hbase.tmpl.master.MasterStatusTmpl
-
- setMetaLocation(ZooKeeperWatcher, ServerName, RegionState.State) -
Static method in class org.apache.hadoop.hbase.zookeeper.MetaRegionTracker
- Sets the location of
hbase:meta in ZooKeeper to the
specified server address.
- setMetaRegion(boolean) -
Method in class org.apache.hadoop.hbase.HTableDescriptor
- INTERNAL Used to denote if the current table represents
-ROOT- or hbase:meta region.
- setMethodName(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall.Builder
required string method_name = 3;
- setMethodName(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.StackTraceElementMessage.Builder
optional string method_name = 2;
- setMethodName(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RequestHeader.Builder
optional string method_name = 3;
- setMethodNameBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall.Builder
required string method_name = 3;
- setMethodNameBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.StackTraceElementMessage.Builder
optional string method_name = 2;
- setMethodNameBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RequestHeader.Builder
optional string method_name = 3;
- setMetrics(int, HBaseProtos.NameInt64Pair) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.ScanMetrics.Builder
repeated .NameInt64Pair metrics = 1;
- setMetrics(int, HBaseProtos.NameInt64Pair.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.ScanMetrics.Builder
repeated .NameInt64Pair metrics = 1;
- setMinColumn(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.ColumnRangeFilter.Builder
optional bytes min_column = 1;
- setMinColumnInclusive(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.ColumnRangeFilter.Builder
optional bool min_column_inclusive = 2;
- setMinMvccVersion(long) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta
-
- setMinStamp(long) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TTimeRange
-
- setMinStampIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TTimeRange
-
- setMinTimestamp(long) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta
-
- setMinVersions(int) -
Method in class org.apache.hadoop.hbase.HColumnDescriptor
-
- setMode(ZooKeeperProtos.SplitLogTask.RecoveryMode) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.Builder
optional .SplitLogTask.RecoveryMode mode = 3 [default = UNKNOWN];
- setMoreResults(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse.Builder
optional bool more_results = 3;
- setMoreResultsInRegion(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse.Builder
optional bool more_results_in_region = 8;
- setMostSigBits(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.Builder
required uint64 most_sig_bits = 2;
- setMutateType(ClientProtos.MutationProto.MutationType) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.Builder
optional .MutationProto.MutationType mutate_type = 2;
- setMutation(ClientProtos.MutationProto) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Action.Builder
optional .MutationProto mutation = 2;
- setMutation(ClientProtos.MutationProto.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Action.Builder
optional .MutationProto mutation = 2;
- setMutation(ClientProtos.MutationProto) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest.Builder
required .MutationProto mutation = 2;
- setMutation(ClientProtos.MutationProto.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest.Builder
required .MutationProto mutation = 2;
- setMutationRequest(int, ClientProtos.MutationProto) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest.Builder
repeated .MutationProto mutation_request = 1;
- setMutationRequest(int, ClientProtos.MutationProto.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest.Builder
repeated .MutationProto mutation_request = 1;
- setMutations(List<Mutation>) -
Method in class org.apache.hadoop.hbase.thrift.generated.BatchMutation
-
- setMutations(List<Mutation>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRow_args
- list of mutation commands
- setMutations(List<Mutation>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRowTs_args
- list of mutation commands
- setMutations(List<TMutation>) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TRowMutations
-
- setMutationsIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.BatchMutation
-
- setMutationsIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRow_args
-
- setMutationsIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRowTs_args
-
- setMutationsIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TRowMutations
-
- setMvccVersion(long) -
Method in class org.apache.hadoop.hbase.KeyValue
-
- setMvccVersionDeltaWidth(int) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta
-
- setMvccVersionFields(LongEncoder) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta
-
- setMvccVersionIndexWidth(int) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta
-
- setMWrap(MetricsRegionServerWrapper) -
Method in class org.apache.hadoop.hbase.tmpl.regionserver.ServerMetricsTmpl.ImplData
-
- setName(byte[]) -
Method in class org.apache.hadoop.hbase.HTableDescriptor
- Deprecated.
- setName(TableName) -
Method in class org.apache.hadoop.hbase.HTableDescriptor
- Deprecated.
- setName(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos.Comparator.Builder
required string name = 1;
- setName(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.Builder
required string name = 1;
- setName(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder
required bytes name = 1;
- setName(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder
required string name = 1;
- setName(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.Builder
required string name = 1;
- setName(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair.Builder
optional string name = 1;
- setName(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder
required bytes name = 1;
- setName(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder
required string name = 1;
- setName(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder
required string name = 1;
- setName(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile.Builder
required string name = 1;
- setName(String) -
Method in class org.apache.hadoop.hbase.rest.model.ColumnSchemaModel
-
- setName(byte[]) -
Method in class org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel.Node.Region
-
- setName(String) -
Method in class org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel.Node
-
- setName(String) -
Method in class org.apache.hadoop.hbase.rest.model.TableInfoModel
-
- setName(String) -
Method in class org.apache.hadoop.hbase.rest.model.TableModel
-
- setName(String) -
Method in class org.apache.hadoop.hbase.rest.model.TableRegionModel
-
- setName(String) -
Method in class org.apache.hadoop.hbase.rest.model.TableSchemaModel
-
- setName(String) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.Builder
required string name = 1;
- setName(String) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder
optional string name = 1;
- setName(String) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder
required string name = 1;
- setName(ByteString) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder
required bytes name = 1;
- setName(String) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Builder
required string name = 1;
- setName(String) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.Builder
required string name = 1;
- setName(int, String) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList.Builder
repeated string name = 1;
- setName(String) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder
required string name = 1;
- setName(String) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Builder
optional string name = 1;
- setName(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.ColumnDescriptor
-
- setName(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.ColumnDescriptor
-
- setName(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.TRegionInfo
-
- setName(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.TRegionInfo
-
- setName(String) -
Method in class org.apache.hadoop.hbase.util.HasThread
-
- setNameBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos.Comparator.Builder
required string name = 1;
- setNameBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.Builder
required string name = 1;
- setNameBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder
required string name = 1;
- setNameBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.Builder
required string name = 1;
- setNameBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair.Builder
optional string name = 1;
- setNameBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder
required string name = 1;
- setNameBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder
required string name = 1;
- setNameBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile.Builder
required string name = 1;
- setNameBytes(ByteString) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.Builder
required string name = 1;
- setNameBytes(ByteString) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder
optional string name = 1;
- setNameBytes(ByteString) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder
required string name = 1;
- setNameBytes(ByteString) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Builder
required string name = 1;
- setNameBytes(ByteString) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.Builder
required string name = 1;
- setNameBytes(ByteString) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder
required string name = 1;
- setNameBytes(ByteString) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Builder
optional string name = 1;
- setNameIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.ColumnDescriptor
-
- setNameIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.TRegionInfo
-
- setNamespace(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder
required bytes namespace = 1;
- setNamespaceDescriptor(HBaseProtos.NamespaceDescriptor) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest.Builder
required .NamespaceDescriptor namespaceDescriptor = 1;
- setNamespaceDescriptor(HBaseProtos.NamespaceDescriptor.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest.Builder
required .NamespaceDescriptor namespaceDescriptor = 1;
- setNamespaceDescriptor(HBaseProtos.NamespaceDescriptor) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.Builder
required .NamespaceDescriptor namespaceDescriptor = 1;
- setNamespaceDescriptor(HBaseProtos.NamespaceDescriptor.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.Builder
required .NamespaceDescriptor namespaceDescriptor = 1;
- setNamespaceDescriptor(int, HBaseProtos.NamespaceDescriptor) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.Builder
repeated .NamespaceDescriptor namespaceDescriptor = 1;
- setNamespaceDescriptor(int, HBaseProtos.NamespaceDescriptor.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.Builder
repeated .NamespaceDescriptor namespaceDescriptor = 1;
- setNamespaceDescriptor(HBaseProtos.NamespaceDescriptor) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest.Builder
required .NamespaceDescriptor namespaceDescriptor = 1;
- setNamespaceDescriptor(HBaseProtos.NamespaceDescriptor.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest.Builder
required .NamespaceDescriptor namespaceDescriptor = 1;
- setNamespaceGroupPermissions(String, String, List<TablePermission>) -
Method in class org.apache.hadoop.hbase.security.access.TableAuthManager
- Overwrites the existing permission set for a group and triggers an update
for zookeeper synchronization.
- setNamespaceName(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.GetUserPermissionsRequest.Builder
optional bytes namespace_name = 3;
- setNamespaceName(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.NamespacePermission.Builder
optional bytes namespace_name = 1;
- setNamespaceName(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest.Builder
required string namespaceName = 1;
- setNamespaceName(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest.Builder
required string namespaceName = 1;
- setNamespaceName(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.Builder
required string namespaceName = 1;
- setNamespaceName(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.Builder
required string namespaceName = 1;
- setNamespaceNameBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest.Builder
required string namespaceName = 1;
- setNamespaceNameBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest.Builder
required string namespaceName = 1;
- setNamespaceNameBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.Builder
required string namespaceName = 1;
- setNamespaceNameBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.Builder
required string namespaceName = 1;
- setNamespacePermission(AccessControlProtos.NamespacePermission) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.Permission.Builder
optional .NamespacePermission namespace_permission = 3;
- setNamespacePermission(AccessControlProtos.NamespacePermission.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.Permission.Builder
optional .NamespacePermission namespace_permission = 3;
- setNamespaceUserPermissions(String, String, List<TablePermission>) -
Method in class org.apache.hadoop.hbase.security.access.TableAuthManager
- Overwrites the existing permission set for a given user for a table, and
triggers an update for zookeeper synchronization.
- setNbRows(int) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerGetList_args
- number of results to return
- setNbRowsIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerGetList_args
-
- setNegativeIndex(int) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.encode.tokenize.TokenizerNode
-
- setNextCallSeq(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest.Builder
optional uint64 next_call_seq = 6;
- setNextNodeOffsetWidth(int) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta
-
- setNextPtr(LruHashMap.Entry<K, V>) -
Method in class org.apache.hadoop.hbase.regionserver.LruHashMap.Entry
- Sets the next pointer for the entry in the LRU.
- setNodeFirstInsertionIndexes() -
Method in class org.apache.hadoop.hbase.codec.prefixtree.encode.tokenize.Tokenizer
- write
- setNonce(long) -
Method in class org.apache.hadoop.hbase.client.Action
-
- setNonce(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.Builder
optional uint64 nonce = 9;
- setNonce(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest.Builder
optional uint64 nonce = 3;
- setNonce(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.ProcessRequest.Builder
optional uint64 nonce = 5;
- setNonce(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.Builder
optional uint64 nonce = 10;
- setNonceGroup(long) -
Method in class org.apache.hadoop.hbase.client.MultiAction
-
- setNonceGroup(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest.Builder
optional uint64 nonceGroup = 2;
- setNonceGroup(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest.Builder
optional uint64 nonce_group = 4;
- setNonceGroup(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest.Builder
optional uint64 nonce_group = 2;
- setNonceGroup(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.ProcessRequest.Builder
optional uint64 nonce_group = 4;
- setNonceGroup(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.Builder
optional uint64 nonceGroup = 9;
- setNonSeekedState() -
Method in class org.apache.hadoop.hbase.io.hfile.HFileReaderV2.ScannerV2
-
- setNonSeekedState() -
Method in class org.apache.hadoop.hbase.io.hfile.HFileReaderV3.ScannerV3
-
- setNoStripeMetadata() -
Method in class org.apache.hadoop.hbase.regionserver.StripeMultiFileWriter
-
- setNumberOfRequests(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder
optional uint32 number_of_requests = 1;
- setNumberOfRowPrefixes(int) -
Method in class org.apache.hadoop.hbase.util.test.RedundantKVGenerator
-
- setNumberOfRows(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest.Builder
optional uint32 number_of_rows = 4;
- setNumberOfRows(int) -
Method in class org.apache.hadoop.hbase.util.test.RedundantKVGenerator
-
- setNumberOfThreads(Job, int) -
Static method in class org.apache.hadoop.hbase.mapreduce.MultithreadedTableMapper
- Set the number of threads in the pool for running maps.
- setNumDataIndexLevels(int) -
Method in class org.apache.hadoop.hbase.io.hfile.FixedFileTrailer
-
- setNumDataIndexLevels(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HFileProtos.FileTrailerProto.Builder
optional uint32 num_data_index_levels = 8;
- setNumFamilyBytes(int) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta
-
- setNumKeyValueBytes(int) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta
-
- setNumMapTasks(String, JobConf) -
Static method in class org.apache.hadoop.hbase.mapred.TableMapReduceUtil
- Deprecated. Sets the number of map tasks for the given job configuration to the
number of regions the given table has.
- setNumMetaBytes(int) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta
-
- setNumMvccVersionBytes(int) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta
-
- setNumOccurrences(int) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.encode.tokenize.TokenizerNode
-
- setNumQualifierBytes(int) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta
-
- setNumReduceTasks(String, JobConf) -
Static method in class org.apache.hadoop.hbase.mapred.TableMapReduceUtil
- Deprecated. Sets the number of reduce tasks for the given job configuration to the
number of regions the given table has.
- setNumReduceTasks(String, Job) -
Static method in class org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil
- Sets the number of reduce tasks for the given job configuration to the
number of regions the given table has.
- setNumRowBytes(int) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta
-
- setNumRows(int) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.getScannerResults_args
- number of rows to return
- setNumRows(int) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.getScannerRows_args
- number of rows to return
- setNumRowsIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.getScannerResults_args
-
- setNumRowsIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.getScannerRows_args
-
- setNumTagsBytes(int) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta
-
- setNumTimestampBytes(int) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta
-
- setNumUniqueFamilies(int) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta
-
- setNumUniqueQualifiers(int) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta
-
- setNumUniqueRows(int) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta
-
- setNumUniqueTags(int) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta
-
- setNumValueBytes(int) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta
-
- setNumVersions(int) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getVer_args
- number of versions to retrieve
- setNumVersions(int) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getVerTs_args
- number of versions to retrieve
- setNumVersionsIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getVer_args
-
- setNumVersionsIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getVerTs_args
-
- setOffline(boolean) -
Method in class org.apache.hadoop.hbase.HRegionInfo
- The parent of a region split is offline while split daughters hold
references to the parent.
- setOffline(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder
optional bool offline = 5;
- setOffPeak(boolean) -
Method in class org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest
-
- setOffset(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.ColumnPaginationFilter.Builder
optional int32 offset = 2;
- setOffset(int) -
Method in interface org.apache.hadoop.hbase.util.ByteRange
- Update the beginning of this range.
- setOffset(int) -
Method in interface org.apache.hadoop.hbase.util.PositionedByteRange
-
- setOffset(int) -
Method in class org.apache.hadoop.hbase.util.SimpleByteRange
-
- setOffset(int) -
Method in class org.apache.hadoop.hbase.util.SimplePositionedByteRange
- Update the beginning of this range.
- setOn(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest.Builder
required bool on = 1;
- setOnlineRegions(List<HRegionInfo>) -
Method in class org.apache.hadoop.hbase.tmpl.regionserver.RegionListTmpl.ImplData
-
- setOpenForDistributedLogReplay(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo.Builder
optional bool openForDistributedLogReplay = 4;
- setOpenInfo(int, AdminProtos.OpenRegionRequest.RegionOpenInfo) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.Builder
repeated .OpenRegionRequest.RegionOpenInfo open_info = 1;
- setOpenInfo(int, AdminProtos.OpenRegionRequest.RegionOpenInfo.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.Builder
repeated .OpenRegionRequest.RegionOpenInfo open_info = 1;
- setOpeningState(int, AdminProtos.OpenRegionResponse.RegionOpeningState) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse.Builder
repeated .OpenRegionResponse.RegionOpeningState opening_state = 1;
- setOpenSeqNum(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.Builder
optional uint64 open_seq_num = 3;
- setOperationStatus(int, OperationStatus) -
Method in class org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress
- Sets the status code for the operation(Mutation) at the specified position.
- setOperationTimeout(int) -
Method in class org.apache.hadoop.hbase.client.HTable
-
- setOperator(FilterProtos.FilterList.Operator) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.FilterList.Builder
required .FilterList.Operator operator = 1;
- setOrdinal(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabel.Builder
optional uint32 ordinal = 2;
- setOSVersion(String) -
Method in class org.apache.hadoop.hbase.rest.model.VersionModel
-
- setOsVersion(String) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version.Builder
optional string osVersion = 3;
- setOsVersionBytes(ByteString) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version.Builder
optional string osVersion = 3;
- setOutputArrayOffset(int) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.encode.tokenize.TokenizerNode
-
- setOwner(User) -
Method in class org.apache.hadoop.hbase.HTableDescriptor
- Deprecated.
- setOwnerString(String) -
Method in class org.apache.hadoop.hbase.HTableDescriptor
- Deprecated.
- setPageSize(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.PageFilter.Builder
required int64 page_size = 1;
- setParent(TokenizerNode) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.encode.tokenize.TokenizerNode
-
- setParentId(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.TracingProtos.RPCTInfo.Builder
optional int64 parent_id = 2;
- setParentStartPosition(int) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.encode.column.ColumnNodeWriter
- get/set
- setPassword(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos.Token.Builder
optional bytes password = 2;
- setPassword(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.DelegationToken.Builder
optional bytes password = 2;
- setPath(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest.FamilyPath.Builder
required string path = 2;
- setPathBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest.FamilyPath.Builder
required string path = 2;
- setPattern(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos.RegexStringComparator.Builder
required string pattern = 1;
- setPatternBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos.RegexStringComparator.Builder
required string pattern = 1;
- setPatternFlags(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos.RegexStringComparator.Builder
required int32 pattern_flags = 2;
- setPayload(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition.Builder
optional bytes payload = 5;
- setPeerID(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder
required string peerID = 1;
- setPeerIDBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder
required string peerID = 1;
- setPeerTableCFs(String, String) -
Method in class org.apache.hadoop.hbase.client.replication.ReplicationAdmin
- Deprecated. use
ReplicationAdmin.setPeerTableCFs(String, Map)
- setPeerTableCFs(String, Map<TableName, ? extends Collection<String>>) -
Method in class org.apache.hadoop.hbase.client.replication.ReplicationAdmin
- Set the replicable table-cf config of the specified peer
- setPeerTableCFsConfig(String, String) -
Method in interface org.apache.hadoop.hbase.replication.ReplicationPeers
- Set the table and column-family list string of the peer to ZK.
- setPeerTableCFsConfig(String, String) -
Method in class org.apache.hadoop.hbase.replication.ReplicationPeersZKImpl
-
- setPermission(int, AccessControlProtos.Permission) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.CheckPermissionsRequest.Builder
repeated .Permission permission = 1;
- setPermission(int, AccessControlProtos.Permission.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.CheckPermissionsRequest.Builder
repeated .Permission permission = 1;
- setPermission(AccessControlProtos.Permission) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UserPermission.Builder
required .Permission permission = 3;
- setPermission(AccessControlProtos.Permission.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UserPermission.Builder
required .Permission permission = 3;
- setPermissions(int, AccessControlProtos.Permission) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions.UserPermissions.Builder
repeated .Permission permissions = 2;
- setPermissions(int, AccessControlProtos.Permission.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions.UserPermissions.Builder
repeated .Permission permissions = 2;
- setPingInterval(Configuration, int) -
Static method in class org.apache.hadoop.hbase.ipc.RpcClient
- set the ping interval value in configuration
- setPort(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder
optional uint32 port = 2;
- setPort(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest.Builder
required uint32 port = 1;
- setPort(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ExceptionResponse.Builder
optional int32 port = 4;
- setPort(int) -
Method in class org.apache.hadoop.hbase.thrift.generated.TRegionInfo
-
- setPortIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.TRegionInfo
-
- setPosition(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition.Builder
required int64 position = 1;
- setPosition(long) -
Method in class org.apache.hadoop.hbase.replication.regionserver.ReplicationHLogReaderManager
-
- setPosition(int) -
Method in interface org.apache.hadoop.hbase.util.PositionedByteRange
- Update the
position index.
- setPosition(int) -
Method in class org.apache.hadoop.hbase.util.SimplePositionedByteRange
-
- setPrefetchBlocksOnOpen(boolean) -
Method in class org.apache.hadoop.hbase.HColumnDescriptor
-
- setPrefix(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.ColumnPrefixFilter.Builder
required bytes prefix = 1;
- setPrefix(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.PrefixFilter.Builder
optional bytes prefix = 1;
- setPrefixLengthVariance(int) -
Method in class org.apache.hadoop.hbase.util.test.RedundantKVGenerator
-
- setPreserveSplits(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableRequest.Builder
optional bool preserveSplits = 2 [default = false];
- setPrevBalanceValue(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse.Builder
optional bool prev_balance_value = 1;
- setPrevPtr(LruHashMap.Entry<K, V>) -
Method in class org.apache.hadoop.hbase.regionserver.LruHashMap.Entry
- Sets the previous pointer for the entry in the LRU.
- setPrevValue(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.Builder
optional bool prev_value = 1;
- setPriority(int) -
Method in class org.apache.hadoop.hbase.ipc.DelegatingPayloadCarryingRpcController
-
- setPriority(TableName) -
Method in class org.apache.hadoop.hbase.ipc.DelegatingPayloadCarryingRpcController
-
- setPriority(int) -
Method in class org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController
-
- setPriority(TableName) -
Method in class org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController
-
- setPriority(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RequestHeader.Builder
optional uint32 priority = 6;
- setPriority(int) -
Method in class org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest
- Sets the priority for the request
- setPriority(int) -
Method in class org.apache.hadoop.hbase.util.HasThread
-
- setProcedure(HBaseProtos.ProcedureDescription) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.Builder
required .ProcedureDescription procedure = 1;
- setProcedure(HBaseProtos.ProcedureDescription.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.Builder
required .ProcedureDescription procedure = 1;
- setProcedure(HBaseProtos.ProcedureDescription) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.Builder
optional .ProcedureDescription procedure = 1;
- setProcedure(HBaseProtos.ProcedureDescription.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.Builder
optional .ProcedureDescription procedure = 1;
- setProcessed(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiResponse.Builder
optional bool processed = 2;
- setProcessed(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateResponse.Builder
optional bool processed = 2;
- setPurpose(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock.Builder
optional string purpose = 5;
- setPurposeBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock.Builder
optional string purpose = 5;
- setPut(TPut) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndPut_args
- the TPut to put if the check succeeds
- setPut(TPut) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.put_args
- the TPut to put
- setPut(TPut) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TMutation
-
- setPutIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndPut_args
-
- setPutIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.put_args
-
- setPuts(List<TPut>) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.putMultiple_args
- a list of TPuts to commit
- setPutsIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.putMultiple_args
-
- setQualifier(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.TablePermission.Builder
optional bytes qualifier = 3;
- setQualifier(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell.Builder
optional bytes qualifier = 3;
- setQualifier(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.CellProtos.KeyValue.Builder
required bytes qualifier = 3;
- setQualifier(int, ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Column.Builder
repeated bytes qualifier = 2;
- setQualifier(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition.Builder
required bytes qualifier = 3;
- setQualifier(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue.Builder
optional bytes qualifier = 1;
- setQualifier(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder
required bytes qualifier = 2;
- setQualifier(byte[]) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TColumn
-
- setQualifier(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TColumn
-
- setQualifier(byte[]) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TColumnIncrement
-
- setQualifier(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TColumnIncrement
-
- setQualifier(byte[]) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TColumnValue
-
- setQualifier(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TColumnValue
-
- setQualifier(byte[]) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndDelete_args
- column qualifier to check
- setQualifier(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndDelete_args
-
- setQualifier(byte[]) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndPut_args
- column qualifier to check
- setQualifier(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndPut_args
-
- setQualifierIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TColumn
-
- setQualifierIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TColumnIncrement
-
- setQualifierIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TColumnValue
-
- setQualifierIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndDelete_args
-
- setQualifierIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndPut_args
-
- setQualifierLengthVariance(int) -
Method in class org.apache.hadoop.hbase.util.test.RedundantKVGenerator
-
- setQualifierOffsetWidth(int) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta
-
- setQualifiers(int, ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.FirstKeyValueMatchingQualifiersFilter.Builder
repeated bytes qualifiers = 1;
- setQualifierValue(int, ClientProtos.MutationProto.ColumnValue.QualifierValue) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.Builder
repeated .MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
- setQualifierValue(int, ClientProtos.MutationProto.ColumnValue.QualifierValue.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.Builder
repeated .MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
- setRandomizer(Random) -
Method in class org.apache.hadoop.hbase.util.test.RedundantKVGenerator
-
- setRange(FSProtos.Reference.Range) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.Builder
required .Reference.Range range = 2;
- setRaw(boolean) -
Method in class org.apache.hadoop.hbase.client.Scan
- Enable/disable "raw" mode for this scan.
- setReadOnly(boolean) -
Method in class org.apache.hadoop.hbase.client.UnmodifyableHTableDescriptor
-
- setReadOnly(boolean) -
Method in class org.apache.hadoop.hbase.HTableDescriptor
- Setting the table as read only sets all the columns in the table as read
only.
- setReadOnly(boolean) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Builder
optional bool readOnly = 5;
- setReadRequestsCount(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder
optional uint64 read_requests_count = 8;
- setReadRequestsCount(long) -
Method in class org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel.Node.Region
-
- setReadRequestsCount(long) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder
optional int64 readRequestsCount = 7;
- setRealUser(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder
optional string real_user = 2;
- setRealUserBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder
optional string real_user = 2;
- setReason(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest.Builder
required string reason = 1;
- setReason(String) -
Method in class org.apache.hadoop.hbase.security.access.AuthResult
-
- setReasonBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest.Builder
required string reason = 1;
- setRecovering(boolean) -
Method in class org.apache.hadoop.hbase.regionserver.HRegion
- Reset recovering state of current region
- setRecoveryMode(boolean) -
Method in class org.apache.hadoop.hbase.master.SplitLogManager
- This function is to set recovery mode from outstanding split log tasks from before or
current configuration setting
- setReference(FSProtos.Reference) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile.Builder
optional .Reference reference = 2;
- setReference(FSProtos.Reference.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile.Builder
optional .Reference reference = 2;
- setRegex(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.ListLabelsRequest.Builder
optional string regex = 1;
- setRegexBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.ListLabelsRequest.Builder
optional string regex = 1;
- setRegion(HBaseProtos.RegionSpecifier) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest.Builder
required .RegionSpecifier region = 1;
- setRegion(HBaseProtos.RegionSpecifier.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest.Builder
required .RegionSpecifier region = 1;
- setRegion(HBaseProtos.RegionSpecifier) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest.Builder
required .RegionSpecifier region = 1;
- setRegion(HBaseProtos.RegionSpecifier.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest.Builder
required .RegionSpecifier region = 1;
- setRegion(HBaseProtos.RegionSpecifier) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest.Builder
required .RegionSpecifier region = 1;
- setRegion(HBaseProtos.RegionSpecifier.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest.Builder
required .RegionSpecifier region = 1;
- setRegion(HBaseProtos.RegionSpecifier) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest.Builder
required .RegionSpecifier region = 1;
- setRegion(HBaseProtos.RegionSpecifier.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest.Builder
required .RegionSpecifier region = 1;
- setRegion(HBaseProtos.RegionSpecifier) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest.Builder
required .RegionSpecifier region = 1;
- setRegion(HBaseProtos.RegionSpecifier.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest.Builder
required .RegionSpecifier region = 1;
- setRegion(HBaseProtos.RegionInfo) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo.Builder
required .RegionInfo region = 1;
- setRegion(HBaseProtos.RegionInfo.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo.Builder
required .RegionInfo region = 1;
- setRegion(HBaseProtos.RegionSpecifier) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest.Builder
required .RegionSpecifier region = 1;
- setRegion(HBaseProtos.RegionSpecifier.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest.Builder
required .RegionSpecifier region = 1;
- setRegion(HBaseProtos.RegionInfo) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo.Builder
required .RegionInfo region = 1;
- setRegion(HBaseProtos.RegionInfo.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo.Builder
required .RegionInfo region = 1;
- setRegion(HBaseProtos.RegionSpecifier) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest.Builder
required .RegionSpecifier region = 1;
- setRegion(HBaseProtos.RegionSpecifier.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest.Builder
required .RegionSpecifier region = 1;
- setRegion(HBaseProtos.RegionSpecifier) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest.Builder
required .RegionSpecifier region = 1;
- setRegion(HBaseProtos.RegionSpecifier.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest.Builder
required .RegionSpecifier region = 1;
- setRegion(HBaseProtos.RegionSpecifier) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.Builder
required .RegionSpecifier region = 1;
- setRegion(HBaseProtos.RegionSpecifier.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.Builder
required .RegionSpecifier region = 1;
- setRegion(HBaseProtos.RegionSpecifier) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest.Builder
required .RegionSpecifier region = 1;
- setRegion(HBaseProtos.RegionSpecifier.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest.Builder
required .RegionSpecifier region = 1;
- setRegion(HBaseProtos.RegionSpecifier) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest.Builder
required .RegionSpecifier region = 1;
- setRegion(HBaseProtos.RegionSpecifier.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest.Builder
required .RegionSpecifier region = 1;
- setRegion(HBaseProtos.RegionSpecifier) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionAction.Builder
required .RegionSpecifier region = 1;
- setRegion(HBaseProtos.RegionSpecifier.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionAction.Builder
required .RegionSpecifier region = 1;
- setRegion(HBaseProtos.RegionSpecifier) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest.Builder
optional .RegionSpecifier region = 1;
- setRegion(HBaseProtos.RegionSpecifier.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest.Builder
optional .RegionSpecifier region = 1;
- setRegion(HBaseProtos.RegionInfo) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.TableSnapshotRegionSplit.Builder
optional .RegionInfo region = 4;
- setRegion(HBaseProtos.RegionInfo.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.TableSnapshotRegionSplit.Builder
optional .RegionInfo region = 4;
- setRegion(HBaseProtos.RegionSpecifier) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest.Builder
required .RegionSpecifier region = 1;
- setRegion(HBaseProtos.RegionSpecifier.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest.Builder
required .RegionSpecifier region = 1;
- setRegion(HBaseProtos.RegionSpecifier) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest.Builder
required .RegionSpecifier region = 1;
- setRegion(HBaseProtos.RegionSpecifier.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest.Builder
required .RegionSpecifier region = 1;
- setRegion(HBaseProtos.RegionSpecifier) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionRequest.Builder
required .RegionSpecifier region = 1;
- setRegion(HBaseProtos.RegionSpecifier.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionRequest.Builder
required .RegionSpecifier region = 1;
- setRegion(HBaseProtos.RegionSpecifier) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest.Builder
required .RegionSpecifier region = 1;
- setRegion(HBaseProtos.RegionSpecifier.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest.Builder
required .RegionSpecifier region = 1;
- setRegionA(HBaseProtos.RegionSpecifier) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest.Builder
required .RegionSpecifier region_a = 1;
- setRegionA(HBaseProtos.RegionSpecifier.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest.Builder
required .RegionSpecifier region_a = 1;
- setRegionA(HBaseProtos.RegionSpecifier) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest.Builder
required .RegionSpecifier region_a = 1;
- setRegionA(HBaseProtos.RegionSpecifier.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest.Builder
required .RegionSpecifier region_a = 1;
- setRegionAction(int, ClientProtos.RegionAction) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest.Builder
repeated .RegionAction regionAction = 1;
- setRegionAction(int, ClientProtos.RegionAction.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest.Builder
repeated .RegionAction regionAction = 1;
- setRegionActionResult(int, ClientProtos.RegionActionResult) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiResponse.Builder
repeated .RegionActionResult regionActionResult = 1;
- setRegionActionResult(int, ClientProtos.RegionActionResult.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiResponse.Builder
repeated .RegionActionResult regionActionResult = 1;
- setRegionB(HBaseProtos.RegionSpecifier) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest.Builder
required .RegionSpecifier region_b = 2;
- setRegionB(HBaseProtos.RegionSpecifier.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest.Builder
required .RegionSpecifier region_b = 2;
- setRegionB(HBaseProtos.RegionSpecifier) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest.Builder
required .RegionSpecifier region_b = 2;
- setRegionB(HBaseProtos.RegionSpecifier.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest.Builder
required .RegionSpecifier region_b = 2;
- setRegionCachePrefetch(TableName, boolean) -
Method in interface org.apache.hadoop.hbase.client.HConnection
- Enable or disable region cache prefetch for the table.
- setRegionCachePrefetch(byte[], boolean) -
Method in interface org.apache.hadoop.hbase.client.HConnection
-
- setRegionCachePrefetch(TableName, boolean) -
Method in class org.apache.hadoop.hbase.client.HConnectionManager.HConnectionImplementation
-
- setRegionCachePrefetch(byte[], boolean) -
Method in class org.apache.hadoop.hbase.client.HConnectionManager.HConnectionImplementation
-
- setRegionCachePrefetch(byte[], boolean) -
Static method in class org.apache.hadoop.hbase.client.HTable
- Enable or disable region cache prefetch for the table.
- setRegionCachePrefetch(TableName, boolean) -
Static method in class org.apache.hadoop.hbase.client.HTable
-
- setRegionCachePrefetch(Configuration, byte[], boolean) -
Static method in class org.apache.hadoop.hbase.client.HTable
- Enable or disable region cache prefetch for the table.
- setRegionCachePrefetch(Configuration, TableName, boolean) -
Static method in class org.apache.hadoop.hbase.client.HTable
-
- setRegionCoprocessorHost(RegionCoprocessorHost) -
Method in class org.apache.hadoop.hbase.regionserver.StoreFileInfo
- Sets the region coprocessor env.
- setRegionFilter(String) -
Method in class org.apache.hadoop.hbase.regionserver.wal.HLogPrettyPrinter
- sets the region by which output will be filtered
- setRegionId(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder
required uint64 region_id = 1;
- setRegionInfo(int, HBaseProtos.RegionInfo) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionResponse.Builder
repeated .RegionInfo region_info = 1;
- setRegionInfo(int, HBaseProtos.RegionInfo.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionResponse.Builder
repeated .RegionInfo region_info = 1;
- setRegionInfo(HBaseProtos.RegionInfo) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.Builder
required .RegionInfo region_info = 1;
- setRegionInfo(HBaseProtos.RegionInfo.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.Builder
required .RegionInfo region_info = 1;
- setRegionInfo(HBaseProtos.RegionInfo) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.Builder
required .RegionInfo region_info = 1;
- setRegionInfo(HBaseProtos.RegionInfo.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.Builder
required .RegionInfo region_info = 1;
- setRegionInfo(int, HBaseProtos.RegionInfo) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.Builder
repeated .RegionInfo region_info = 2;
- setRegionInfo(int, HBaseProtos.RegionInfo.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.Builder
repeated .RegionInfo region_info = 2;
- setRegionInfo(HBaseProtos.RegionInfo) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.Builder
required .RegionInfo region_info = 2;
- setRegionInfo(HBaseProtos.RegionInfo.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.Builder
required .RegionInfo region_info = 2;
- setRegionLoads(int, ClusterStatusProtos.RegionLoad) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder
repeated .RegionLoad region_loads = 5;
- setRegionLoads(int, ClusterStatusProtos.RegionLoad.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder
repeated .RegionLoad region_loads = 5;
- setRegionManifests(int, SnapshotProtos.SnapshotRegionManifest) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest.Builder
repeated .SnapshotRegionManifest region_manifests = 2;
- setRegionManifests(int, SnapshotProtos.SnapshotRegionManifest.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest.Builder
repeated .SnapshotRegionManifest region_manifests = 2;
- setRegionName(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest.Builder
required bytes region_name = 1;
- setRegionName(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor.Builder
optional bytes region_name = 7;
- setRegionName(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition.Builder
required bytes region_name = 2;
- setRegions(List<StorageClusterStatusModel.Node.Region>) -
Method in class org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel.Node
-
- setRegions(int) -
Method in class org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel
-
- setRegions(List<TableRegionModel>) -
Method in class org.apache.hadoop.hbase.rest.model.TableInfoModel
-
- setRegions(int) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Builder
optional int32 regions = 3;
- setRegions(int, StorageClusterStatusMessage.StorageClusterStatus.Region) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;
- setRegions(int, StorageClusterStatusMessage.StorageClusterStatus.Region.Builder) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;
- setRegions(int, TableInfoMessage.TableInfo.Region) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Builder
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;
- setRegions(int, TableInfoMessage.TableInfo.Region.Builder) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Builder
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Region regions = 2;
- setRegionServer(HRegionServer) -
Method in class org.apache.hadoop.hbase.tmpl.regionserver.RegionListTmpl.ImplData
-
- setRegionServer(HRegionServer) -
Method in class org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl.ImplData
-
- setRegionServers(List<ServerName>) -
Method in class org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint
- Set the list of region servers for that peer
- setRegionsInTransition(int, ClusterStatusProtos.RegionInTransition) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder
repeated .RegionInTransition regions_in_transition = 4;
- setRegionsInTransition(int, ClusterStatusProtos.RegionInTransition.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder
repeated .RegionInTransition regions_in_transition = 4;
- setRegionSpecifier(HBaseProtos.RegionSpecifier) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder
required .RegionSpecifier region_specifier = 1;
- setRegionSpecifier(HBaseProtos.RegionSpecifier.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder
required .RegionSpecifier region_specifier = 1;
- setRegionSplitPolicyClassName(String) -
Method in class org.apache.hadoop.hbase.HTableDescriptor
- This sets the class associated with the region split policy which
determines when a region split should occur.
- setRegionState(ClusterStatusProtos.RegionState) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.Builder
required .RegionState region_state = 2;
- setRegionState(ClusterStatusProtos.RegionState.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.Builder
required .RegionState region_state = 2;
- setRegionsToReopen(List<HRegionInfo>) -
Method in class org.apache.hadoop.hbase.master.AssignmentManager
- Set the list of regions that will be reopened
because of an update in table schema
- setRegionToFlush(int, ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse.Builder
repeated bytes region_to_flush = 1;
- setReplicationEndpointImpl(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Builder
optional string replicationEndpointImpl = 2;
- setReplicationEndpointImpl(String) -
Method in class org.apache.hadoop.hbase.replication.ReplicationPeerConfig
- Sets the ReplicationEndpoint plugin class for this peer.
- setReplicationEndpointImplBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Builder
optional string replicationEndpointImpl = 2;
- setReplicationLag(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder
required uint64 replicationLag = 5;
- setReplLoadSink(ClusterStatusProtos.ReplicationLoadSink) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder
optional .ReplicationLoadSink replLoadSink = 11;
- setReplLoadSink(ClusterStatusProtos.ReplicationLoadSink.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder
optional .ReplicationLoadSink replLoadSink = 11;
- setReplLoadSource(int, ClusterStatusProtos.ReplicationLoadSource) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder
repeated .ReplicationLoadSource replLoadSource = 10;
- setReplLoadSource(int, ClusterStatusProtos.ReplicationLoadSource.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder
repeated .ReplicationLoadSource replLoadSource = 10;
- setReportEndTime(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder
optional uint64 report_end_time = 8;
- setReportStartTime(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder
optional uint64 report_start_time = 7;
- setRequest(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall.Builder
required bytes request = 4;
- setRequest(CompactionRequest) -
Method in class org.apache.hadoop.hbase.regionserver.compactions.StripeCompactionPolicy.StripeCompactionRequest
-
- setRequestParam(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RequestHeader.Builder
optional bool request_param = 4;
- setRequests(int) -
Method in class org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel.Node
-
- setRequests(int) -
Method in class org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel
-
- setRequests(int) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Builder
optional int32 requests = 4;
- setRequests(int) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder
optional int32 requests = 3;
- setResponse(Message, CellScanner) -
Method in class org.apache.hadoop.hbase.ipc.RpcClient.Call
- Set the return value when there is no error.
- setResponse(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.Builder
optional uint32 response = 1;
- setRESTVersion(String) -
Method in class org.apache.hadoop.hbase.rest.model.VersionModel
-
- setRestVersion(String) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version.Builder
optional string restVersion = 1;
- setRestVersionBytes(ByteString) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version.Builder
optional string restVersion = 1;
- setResult(ClientProtos.Result) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse.Builder
optional .Result result = 1;
- setResult(ClientProtos.Result.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse.Builder
optional .Result result = 1;
- setResult(ClientProtos.Result) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateResponse.Builder
optional .Result result = 1;
- setResult(ClientProtos.Result.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateResponse.Builder
optional .Result result = 1;
- setResult(ClientProtos.Result) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrException.Builder
optional .Result result = 2;
- setResult(ClientProtos.Result.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrException.Builder
optional .Result result = 2;
- setResult(int, ClientProtos.RegionActionResult) -
Method in class org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse.Builder
repeated .RegionActionResult result = 1;
- setResult(int, ClientProtos.RegionActionResult.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse.Builder
repeated .RegionActionResult result = 1;
- setResultOrException(int, ClientProtos.ResultOrException) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionActionResult.Builder
repeated .ResultOrException resultOrException = 1;
- setResultOrException(int, ClientProtos.ResultOrException.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionActionResult.Builder
repeated .ResultOrException resultOrException = 1;
- setResults(int, ClientProtos.Result) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse.Builder
repeated .Result results = 5;
- setResults(int, ClientProtos.Result.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse.Builder
repeated .Result results = 5;
- setRetCode(int) -
Method in class org.apache.hadoop.hbase.util.HBaseFsck
-
- setReturnResults(boolean) -
Method in class org.apache.hadoop.hbase.client.Append
-
- setReversed(boolean) -
Method in class org.apache.hadoop.hbase.client.Scan
- Set whether this scan is a reversed one
- setReversed(boolean) -
Method in class org.apache.hadoop.hbase.filter.Filter
- alter the reversed scan flag
- setReversed(boolean) -
Method in class org.apache.hadoop.hbase.filter.FilterList
-
- setReversed(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.Builder
optional bool reversed = 15 [default = false];
- setReversed(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.TScan
-
- setReversed(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TScan
-
- setReversedIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.TScan
-
- setReversedIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TScan
-
- setRevision(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.Builder
required string revision = 3;
- setRevisionBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.Builder
required string revision = 3;
- setRIT(int) -
Method in interface org.apache.hadoop.hbase.master.MetricsAssignmentManagerSource
- Set the number of regions in transition.
- setRIT(int) -
Method in class org.apache.hadoop.hbase.master.MetricsAssignmentManagerSourceImpl
-
- setRITCountOverThreshold(int) -
Method in interface org.apache.hadoop.hbase.master.MetricsAssignmentManagerSource
- Set the count of the number of regions that have been in transition over the threshold time.
- setRITCountOverThreshold(int) -
Method in class org.apache.hadoop.hbase.master.MetricsAssignmentManagerSourceImpl
-
- setRITOldestAge(long) -
Method in interface org.apache.hadoop.hbase.master.MetricsAssignmentManagerSource
- Set the oldest region in transition.
- setRITOldestAge(long) -
Method in class org.apache.hadoop.hbase.master.MetricsAssignmentManagerSourceImpl
-
- setRootDir(Configuration, Path) -
Static method in class org.apache.hadoop.hbase.util.FSUtils
-
- setRootIndexSizeKB(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder
optional uint32 root_index_size_KB = 12;
- setRootIndexSizeKB(int) -
Method in class org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel.Node.Region
-
- setRootIndexSizeKB(int) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder
optional int32 rootIndexSizeKB = 9;
- setRootRegion(boolean) -
Method in class org.apache.hadoop.hbase.HTableDescriptor
- INTERNAL Used to denote if the current table represents
-ROOT- region.
- setRow(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell.Builder
optional bytes row = 1;
- setRow(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.CellProtos.KeyValue.Builder
required bytes row = 1;
- setRow(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition.Builder
required bytes row = 1;
- setRow(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall.Builder
required bytes row = 1;
- setRow(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Get.Builder
required bytes row = 1;
- setRow(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.Builder
optional bytes row = 1;
- setRow(byte[], int, short) -
Method in class org.apache.hadoop.hbase.regionserver.ScanQueryMatcher
- Set current row
- setRow(ByteString) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.Builder
optional bytes row = 1;
- setRow(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.BatchMutation
-
- setRow(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.BatchMutation
-
- setRow(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.atomicIncrement_args
- row to increment
- setRow(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.atomicIncrement_args
-
- setRow(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAll_args
- Row to update
- setRow(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAll_args
-
- setRow(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAllRow_args
- key of the row to be completely deleted.
- setRow(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAllRow_args
-
- setRow(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAllRowTs_args
- key of the row to be completely deleted.
- setRow(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAllRowTs_args
-
- setRow(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAllTs_args
- Row to update
- setRow(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAllTs_args
-
- setRow(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.get_args
- row key
- setRow(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.get_args
-
- setRow(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRegionInfo_args
- row key
- setRow(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRegionInfo_args
-
- setRow(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRow_args
- row key
- setRow(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRow_args
-
- setRow(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowOrBefore_args
- row key
- setRow(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowOrBefore_args
-
- setRow(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowTs_args
- row key
- setRow(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowTs_args
-
- setRow(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowWithColumns_args
- row key
- setRow(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowWithColumns_args
-
- setRow(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowWithColumnsTs_args
- row key
- setRow(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowWithColumnsTs_args
-
- setRow(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getVer_args
- row key
- setRow(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getVer_args
-
- setRow(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getVerTs_args
- row key
- setRow(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getVerTs_args
-
- setRow(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRow_args
- row key
- setRow(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRow_args
-
- setRow(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRowTs_args
- row key
- setRow(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRowTs_args
-
- setRow(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.TIncrement
-
- setRow(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.TIncrement
-
- setRow(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.TRowResult
-
- setRow(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.TRowResult
-
- setRow(byte[]) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TAppend
-
- setRow(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TAppend
-
- setRow(byte[]) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TDelete
-
- setRow(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TDelete
-
- setRow(byte[]) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TGet
-
- setRow(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TGet
-
- setRow(byte[]) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndDelete_args
- row to check
- setRow(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndDelete_args
-
- setRow(byte[]) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndPut_args
- row to check
- setRow(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndPut_args
-
- setRow(byte[]) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TIncrement
-
- setRow(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TIncrement
-
- setRow(byte[]) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TPut
-
- setRow(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TPut
-
- setRow(byte[]) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TResult
-
- setRow(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TResult
-
- setRow(byte[]) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TRowMutations
-
- setRow(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TRowMutations
-
- setRowBatches(List<BatchMutation>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRows_args
- list of row batches
- setRowBatches(List<BatchMutation>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRowsTs_args
- list of row batches
- setRowBatchesIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRows_args
-
- setRowBatchesIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRowsTs_args
-
- setRowBatchSize(int) -
Method in class org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.Builder
required uint32 rowBatchSize = 4;
- setRowFilter(Filter) -
Method in class org.apache.hadoop.hbase.mapred.TableInputFormatBase
- Deprecated. Allows subclasses to set the
Filter to be used.
- setRowFilter(Filter) -
Method in class org.apache.hadoop.hbase.mapred.TableRecordReader
- Deprecated.
- setRowFilter(Filter) -
Method in class org.apache.hadoop.hbase.mapred.TableRecordReaderImpl
- Deprecated.
- setRowFilter(String) -
Method in class org.apache.hadoop.hbase.regionserver.wal.HLogPrettyPrinter
- sets the region by which output will be filtered
- setRowIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.BatchMutation
-
- setRowIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.atomicIncrement_args
-
- setRowIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAll_args
-
- setRowIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAllRow_args
-
- setRowIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAllRowTs_args
-
- setRowIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAllTs_args
-
- setRowIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.get_args
-
- setRowIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRegionInfo_args
-
- setRowIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRow_args
-
- setRowIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowOrBefore_args
-
- setRowIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowTs_args
-
- setRowIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowWithColumns_args
-
- setRowIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowWithColumnsTs_args
-
- setRowIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getVer_args
-
- setRowIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getVerTs_args
-
- setRowIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRow_args
-
- setRowIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRowTs_args
-
- setRowIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.TIncrement
-
- setRowIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.TRowResult
-
- setRowIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TAppend
-
- setRowIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TDelete
-
- setRowIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TGet
-
- setRowIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndDelete_args
-
- setRowIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndPut_args
-
- setRowIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TIncrement
-
- setRowIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TPut
-
- setRowIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TResult
-
- setRowIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TRowMutations
-
- setRowMutations(TRowMutations) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.mutateRow_args
- mutations to apply
- setRowMutationsIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.mutateRow_args
-
- setRowOffsetPerColumnFamily(int) -
Method in class org.apache.hadoop.hbase.client.Get
- Set offset for the row per Column Family.
- setRowOffsetPerColumnFamily(int) -
Method in class org.apache.hadoop.hbase.client.Scan
- Set offset for the row per Column Family.
- setRowProcessorClassName(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.ProcessRequest.Builder
required string row_processor_class_name = 1;
- setRowProcessorClassNameBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.ProcessRequest.Builder
required string row_processor_class_name = 1;
- setRowProcessorInitializerMessage(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.ProcessRequest.Builder
optional bytes row_processor_initializer_message = 3;
- setRowProcessorInitializerMessageName(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.ProcessRequest.Builder
optional string row_processor_initializer_message_name = 2;
- setRowProcessorInitializerMessageNameBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.ProcessRequest.Builder
optional string row_processor_initializer_message_name = 2;
- setRowProcessorResult(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.ProcessResponse.Builder
required bytes row_processor_result = 1;
- setRows(int, CellSetMessage.CellSet.Row) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Builder
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;
- setRows(int, CellSetMessage.CellSet.Row.Builder) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Builder
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.CellSet.Row rows = 1;
- setRows(List<ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRows_args
- row keys
- setRows(List<ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsTs_args
- row keys
- setRows(List<ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsWithColumns_args
- row keys
- setRows(List<ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsWithColumnsTs_args
- row keys
- setRowsDeleted(long) -
Method in class org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse.Builder
required uint64 rowsDeleted = 1;
- setRowsIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRows_args
-
- setRowsIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsTs_args
-
- setRowsIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsWithColumns_args
-
- setRowsIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsWithColumnsTs_args
-
- setRowTreeDepth(int) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta
-
- setRPC(String, Object[], long) -
Method in interface org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler
-
- setRPC(String, Object[], long) -
Method in class org.apache.hadoop.hbase.monitoring.MonitoredRPCHandlerImpl
- Tells this instance that it is monitoring a new RPC call.
- setRpcControllerFactory(RpcControllerFactory) -
Method in class org.apache.hadoop.hbase.client.ClientSmallReversedScanner
-
- setRpcControllerFactory(RpcControllerFactory) -
Method in class org.apache.hadoop.hbase.client.ClientSmallScanner
-
- setRPCPacket(Message) -
Method in interface org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler
-
- setRPCPacket(Message) -
Method in class org.apache.hadoop.hbase.monitoring.MonitoredRPCHandlerImpl
- Gives this instance a reference to the protobuf received by the RPC, so
that it can later compute its size if asked for it.
- setRpcRetryingCaller(RpcRetryingCaller<Result[]>) -
Method in class org.apache.hadoop.hbase.client.ClientSmallReversedScanner
-
- setRpcRetryingCaller(RpcRetryingCaller<Result[]>) -
Method in class org.apache.hadoop.hbase.client.ClientSmallScanner
-
- setRpcTimeout(int) -
Static method in class org.apache.hadoop.hbase.ipc.RpcClient
-
- setRpcVersion(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master.Builder
optional uint32 rpc_version = 2;
- setRpcVersion(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.Builder
optional uint32 rpc_version = 2;
- setRunner(Runnable) -
Method in class org.apache.hadoop.hbase.client.DelayingRunner
-
- setScan(ClientProtos.Scan) -
Method in class org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.Builder
required .Scan scan = 1;
- setScan(ClientProtos.Scan.Builder) -
Method in class org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.Builder
required .Scan scan = 1;
- setScan(Scan) -
Method in class org.apache.hadoop.hbase.mapreduce.TableInputFormatBase
- Sets the scan defining the actual details like columns etc.
- setScan(Scan) -
Method in class org.apache.hadoop.hbase.mapreduce.TableRecordReader
- Sets the scan defining the actual details like columns etc.
- setScan(Scan) -
Method in class org.apache.hadoop.hbase.mapreduce.TableRecordReaderImpl
- Sets the scan defining the actual details like columns etc.
- setScan(ClientProtos.Scan) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateRequest.Builder
required .Scan scan = 2;
- setScan(ClientProtos.Scan.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateRequest.Builder
required .Scan scan = 2;
- setScan(ClientProtos.Scan) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest.Builder
optional .Scan scan = 2;
- setScan(ClientProtos.Scan.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest.Builder
optional .Scan scan = 2;
- setScan(TScan) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithScan_args
- Scan instance
- setScan(TScan) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.getScannerResults_args
- the scan object to get a Scanner for
- setScan(TScan) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.openScanner_args
- the scan object to get a Scanner for
- setScanIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithScan_args
-
- setScanIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.getScannerResults_args
-
- setScanIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.openScanner_args
-
- setScannerCaching(int) -
Method in class org.apache.hadoop.hbase.client.HTable
- Deprecated. Use
Scan.setCaching(int)
- setScannerCaching(JobConf, int) -
Static method in class org.apache.hadoop.hbase.mapred.TableMapReduceUtil
- Deprecated. Sets the number of rows to return and cache with each scanner iteration.
- setScannerCaching(Job, int) -
Static method in class org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil
- Sets the number of rows to return and cache with each scanner iteration.
- setScannerCallableFactory(ClientSmallScanner.SmallScannerCallableFactory) -
Method in class org.apache.hadoop.hbase.client.ClientSmallReversedScanner
-
- setScannerCallableFactory(ClientSmallScanner.SmallScannerCallableFactory) -
Method in class org.apache.hadoop.hbase.client.ClientSmallScanner
-
- setScannerId(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest.Builder
optional uint64 scanner_id = 3;
- setScannerId(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse.Builder
optional uint64 scanner_id = 2;
- setScannerId(int) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.closeScanner_args
- the Id of the Scanner to close *
- setScannerId(int) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.getScannerRows_args
- the Id of the Scanner to return rows from.
- setScannerIdIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.closeScanner_args
-
- setScannerIdIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.getScannerRows_args
-
- setScanQueryMatcher(ScanQueryMatcher) -
Method in class org.apache.hadoop.hbase.regionserver.StoreFileScanner
-
- setScanResult(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse.Builder
optional int32 scan_result = 1;
- setScans(List<Scan>) -
Method in class org.apache.hadoop.hbase.mapreduce.MultiTableInputFormatBase
- Allows subclasses to set the list of
Scan objects.
- setScope(int) -
Method in class org.apache.hadoop.hbase.HColumnDescriptor
-
- setScopes(int, WALProtos.FamilyScope) -
Method in class org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.Builder
repeated .FamilyScope scopes = 6;
- setScopes(int, WALProtos.FamilyScope.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.Builder
repeated .FamilyScope scopes = 6;
- setScopes(NavigableMap<byte[], Integer>) -
Method in class org.apache.hadoop.hbase.regionserver.wal.HLogKey
-
- setScopeType(WALProtos.ScopeType) -
Method in class org.apache.hadoop.hbase.protobuf.generated.WALProtos.FamilyScope.Builder
required .ScopeType scope_type = 2;
- setSecond(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder
required bytes second = 2;
- setSecond(T2) -
Method in class org.apache.hadoop.hbase.util.Pair
- Replace the second element of the pair.
- setSecond(B) -
Method in class org.apache.hadoop.hbase.util.Triple
-
- setSecondPart(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse.Builder
optional bytes second_part = 2;
- setSecretManager(SecretManager<? extends TokenIdentifier>) -
Method in class org.apache.hadoop.hbase.ipc.RpcServer
-
- setSequenceFilter(long) -
Method in class org.apache.hadoop.hbase.regionserver.wal.HLogPrettyPrinter
- sets the region by which output will be filtered
- setSequenceId(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder
required uint64 sequence_id = 2;
- setSequenceID(long) -
Method in class org.apache.hadoop.hbase.regionserver.StoreFile.Reader
-
- setSequenceNumber(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos.TokenIdentifier.Builder
optional int64 sequence_number = 6;
- setSerializedComparator(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos.Comparator.Builder
optional bytes serialized_comparator = 2;
- setSerializedFilter(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.Builder
optional bytes serialized_filter = 2;
- setServer(HBaseProtos.ServerName) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.Builder
required .ServerName server = 1;
- setServer(HBaseProtos.ServerName.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.Builder
required .ServerName server = 1;
- setServer(HBaseProtos.ServerName) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest.Builder
required .ServerName server = 1;
- setServer(HBaseProtos.ServerName.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest.Builder
required .ServerName server = 1;
- setServer(HBaseProtos.ServerName) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest.Builder
required .ServerName server = 1;
- setServer(HBaseProtos.ServerName.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest.Builder
required .ServerName server = 1;
- setServer(HBaseProtos.ServerName) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest.Builder
required .ServerName server = 1;
- setServer(HBaseProtos.ServerName.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest.Builder
required .ServerName server = 1;
- setServer(HBaseProtos.ServerName) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.Builder
required .ServerName server = 1;
- setServer(HBaseProtos.ServerName.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.Builder
required .ServerName server = 1;
- setServerCurrentTime(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest.Builder
required uint64 server_current_time = 3;
- setServerHasMoreResults(boolean) -
Method in class org.apache.hadoop.hbase.client.RegionServerCallable
-
- setServerInfo(AdminProtos.ServerInfo) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse.Builder
required .ServerInfo server_info = 1;
- setServerInfo(AdminProtos.ServerInfo.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse.Builder
required .ServerInfo server_info = 1;
- setServerLoad(ClusterStatusProtos.ServerLoad) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.Builder
required .ServerLoad server_load = 2;
- setServerLoad(ClusterStatusProtos.ServerLoad.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.Builder
required .ServerLoad server_load = 2;
- setServerManager(ServerManager) -
Method in class org.apache.hadoop.hbase.tmpl.master.MasterStatusTmpl.ImplData
-
- setServerManager(ServerManager) -
Method in class org.apache.hadoop.hbase.tmpl.master.MasterStatusTmpl
-
- setServerName(HBaseProtos.ServerName) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo.Builder
required .ServerName server_name = 1;
- setServerName(HBaseProtos.ServerName.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo.Builder
required .ServerName server_name = 1;
- setServerName(HBaseProtos.ServerName) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition.Builder
required .ServerName server_name = 4;
- setServerName(HBaseProtos.ServerName.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition.Builder
required .ServerName server_name = 4;
- setServerName(HBaseProtos.ServerName) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.Builder
required .ServerName server_name = 2;
- setServerName(HBaseProtos.ServerName.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.Builder
required .ServerName server_name = 2;
- setServerName(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.TRegionInfo
-
- setServerName(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.TRegionInfo
-
- setServerNameIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.TRegionInfo
-
- setServers(List<ServerName>) -
Method in class org.apache.hadoop.hbase.tmpl.master.MasterStatusTmpl.ImplData
-
- setServers(List<ServerName>) -
Method in class org.apache.hadoop.hbase.tmpl.master.MasterStatusTmpl
-
- setServers(List<ServerName>) -
Method in class org.apache.hadoop.hbase.tmpl.master.RegionServerListTmpl.ImplData
-
- setServers(List<ServerName>) -
Method in class org.apache.hadoop.hbase.tmpl.master.RegionServerListTmpl
-
- setServerSideHConnectionRetries(Configuration, String, Log) -
Static method in class org.apache.hadoop.hbase.client.HConnectionManager
- Set the number of retries to use serverside when trying to communicate
with another server over
HConnection.
- setServerStartCode(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest.Builder
optional uint64 serverStartCode = 5;
- setServerStartCode(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.Builder
optional uint64 serverStartCode = 2;
- setServerStartCode(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest.Builder
required uint64 server_start_code = 2;
- setServerVersion(String) -
Method in class org.apache.hadoop.hbase.rest.model.VersionModel
-
- setServerVersion(String) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version.Builder
optional string serverVersion = 4;
- setServerVersionBytes(ByteString) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version.Builder
optional string serverVersion = 4;
- setService(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos.Token.Builder
optional bytes service = 3;
- setService(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.DelegationToken.Builder
optional string service = 4;
- setServiceBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.DelegationToken.Builder
optional string service = 4;
- setServiceCall(ClientProtos.CoprocessorServiceCall) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Action.Builder
optional .CoprocessorServiceCall service_call = 4;
- setServiceCall(ClientProtos.CoprocessorServiceCall.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Action.Builder
optional .CoprocessorServiceCall service_call = 4;
- setServiceName(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall.Builder
required string service_name = 2;
- setServiceName(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ConnectionHeader.Builder
optional string service_name = 2;
- setServiceNameBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall.Builder
required string service_name = 2;
- setServiceNameBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ConnectionHeader.Builder
optional string service_name = 2;
- setServiceResult(ClientProtos.CoprocessorServiceResult) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrException.Builder
optional .CoprocessorServiceResult service_result = 4;
- setServiceResult(ClientProtos.CoprocessorServiceResult.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrException.Builder
optional .CoprocessorServiceResult service_result = 4;
- setSidelineBigOverlaps(boolean) -
Method in class org.apache.hadoop.hbase.util.HBaseFsck
-
- setSidelineDir(String) -
Method in class org.apache.hadoop.hbase.util.HBaseFsck
-
- setSignature(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder
required string signature = 1;
- setSignatureBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder
required string signature = 1;
- setSingleColumnValueFilter(FilterProtos.SingleColumnValueFilter) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.SingleColumnValueExcludeFilter.Builder
required .SingleColumnValueFilter single_column_value_filter = 1;
- setSingleColumnValueFilter(FilterProtos.SingleColumnValueFilter.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.SingleColumnValueExcludeFilter.Builder
required .SingleColumnValueFilter single_column_value_filter = 1;
- setSize(int) -
Method in class org.apache.hadoop.hbase.replication.ReplicationEndpoint.ReplicateContext
-
- setSizeOfLogQueue(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder
required uint32 sizeOfLogQueue = 3;
- setSizeOfLogQueue(int) -
Method in class org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationGlobalSourceSource
-
- setSizeOfLogQueue(int) -
Method in interface org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceSource
-
- setSizeOfLogQueue(int) -
Method in class org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceSourceImpl
-
- setSizeOfLogQueue(int) -
Method in class org.apache.hadoop.hbase.replication.regionserver.MetricsSource
- Set the size of the log queue
- setSkipChecks(boolean) -
Method in class org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo
-
- setSleepInterval(long) -
Method in class org.apache.hadoop.hbase.util.RetryCounter.RetryConfig
-
- setSlop(Configuration) -
Method in class org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer
-
- setSlop(Configuration) -
Method in class org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer
-
- setSmall(boolean) -
Method in class org.apache.hadoop.hbase.client.Scan
- Set whether this scan is a small scan
- setSmall(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.Builder
optional bool small = 14;
- setSnapshot(HBaseProtos.SnapshotDescription) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest.Builder
required .SnapshotDescription snapshot = 1;
- setSnapshot(HBaseProtos.SnapshotDescription.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest.Builder
required .SnapshotDescription snapshot = 1;
- setSnapshot(HBaseProtos.ProcedureDescription) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.Builder
optional .ProcedureDescription snapshot = 2;
- setSnapshot(HBaseProtos.ProcedureDescription.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.Builder
optional .ProcedureDescription snapshot = 2;
- setSnapshot(HBaseProtos.SnapshotDescription) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest.Builder
optional .SnapshotDescription snapshot = 1;
- setSnapshot(HBaseProtos.SnapshotDescription.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest.Builder
optional .SnapshotDescription snapshot = 1;
- setSnapshot(HBaseProtos.SnapshotDescription) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest.Builder
optional .SnapshotDescription snapshot = 1;
- setSnapshot(HBaseProtos.SnapshotDescription.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest.Builder
optional .SnapshotDescription snapshot = 1;
- setSnapshot(HBaseProtos.SnapshotDescription) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.Builder
optional .SnapshotDescription snapshot = 2;
- setSnapshot(HBaseProtos.SnapshotDescription.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.Builder
optional .SnapshotDescription snapshot = 2;
- setSnapshot(HBaseProtos.SnapshotDescription) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest.Builder
required .SnapshotDescription snapshot = 1;
- setSnapshot(HBaseProtos.SnapshotDescription.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest.Builder
required .SnapshotDescription snapshot = 1;
- setSnapshot(HBaseProtos.SnapshotDescription) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest.Builder
required .SnapshotDescription snapshot = 1;
- setSnapshot(HBaseProtos.SnapshotDescription.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest.Builder
required .SnapshotDescription snapshot = 1;
- setSnapshotHandlerForTesting(TableName, SnapshotSentinel) -
Method in class org.apache.hadoop.hbase.master.snapshot.SnapshotManager
- Set the handler for the current snapshot
- setSnapshots(int, HBaseProtos.SnapshotDescription) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.Builder
repeated .SnapshotDescription snapshots = 1;
- setSnapshots(int, HBaseProtos.SnapshotDescription.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.Builder
repeated .SnapshotDescription snapshots = 1;
- setSocketSendBufSize(int) -
Method in class org.apache.hadoop.hbase.ipc.RpcServer
- Sets the socket buffer size used for responding to RPCs.
- setSocketSendBufSize(int) -
Method in interface org.apache.hadoop.hbase.ipc.RpcServerInterface
-
- setSocketTimeout(Configuration, int) -
Static method in class org.apache.hadoop.hbase.ipc.RpcClient
- Set the socket timeout
- setSortColumns(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.TScan
-
- setSortColumnsIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.TScan
-
- setSortedColumns(List<TColumn>) -
Method in class org.apache.hadoop.hbase.thrift.generated.TRowResult
-
- setSortedColumnsIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.TRowResult
-
- setSortedPrefixes(int, ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.MultipleColumnPrefixFilter.Builder
repeated bytes sorted_prefixes = 1;
- setSource(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder
optional string source = 1;
- setSource(MetricsThriftServerSource) -
Method in class org.apache.hadoop.hbase.thrift.ThriftMetrics
-
- setSourceBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder
optional string source = 1;
- setSpec(HBaseProtos.RegionSpecifier) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.Builder
required .RegionSpecifier spec = 1;
- setSpec(HBaseProtos.RegionSpecifier.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.Builder
required .RegionSpecifier spec = 1;
- setSplit(boolean) -
Method in class org.apache.hadoop.hbase.HRegionInfo
-
- setSplit(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder
optional bool split = 6;
- setSplitkey(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.Builder
required bytes splitkey = 1;
- setSplitKeys(int, ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest.Builder
repeated bytes split_keys = 2;
- setSplitPoint(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest.Builder
optional bytes split_point = 2;
- setSrcChecksum(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.Builder
required string src_checksum = 6;
- setSrcChecksumBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.Builder
required string src_checksum = 6;
- setStackTrace(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ExceptionResponse.Builder
optional string stack_trace = 2;
- setStackTraceBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ExceptionResponse.Builder
optional string stack_trace = 2;
- setStamp(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.Builder
optional uint64 stamp = 3;
- setStartAndPrefix(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithPrefix_args
- the prefix (and thus start row) of the keys you want
- setStartAndPrefix(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithPrefix_args
-
- setStartAndPrefixIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithPrefix_args
-
- setStartCode(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder
optional uint64 start_code = 3;
- setStartCode(long) -
Method in class org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel.Node
-
- setStartCode(long) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder
optional int64 startCode = 2;
- setStartDate(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp.Builder
required string start_date = 1;
- setStartDateBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp.Builder
required string start_date = 1;
- setStartKey(Configuration, byte[]) -
Static method in class org.apache.hadoop.hbase.mapreduce.SimpleTotalOrderPartitioner
-
- setStartKey(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder
optional bytes start_key = 3;
- setStartKey(byte[]) -
Method in class org.apache.hadoop.hbase.rest.model.TableRegionModel
-
- setStartKey(ByteString) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region.Builder
optional bytes startKey = 2;
- setStartKey(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.TRegionInfo
-
- setStartKey(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.TRegionInfo
-
- setStartKeyIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.TRegionInfo
-
- setStartRow(byte[]) -
Method in class org.apache.hadoop.hbase.client.Scan
- Set the start row of the scan.
- setStartRow(byte[]) -
Method in class org.apache.hadoop.hbase.mapred.TableRecordReader
- Deprecated.
- setStartRow(byte[]) -
Method in class org.apache.hadoop.hbase.mapred.TableRecordReaderImpl
- Deprecated.
- setStartRow(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.Builder
optional bytes start_row = 3;
- setStartRow(byte[]) -
Method in class org.apache.hadoop.hbase.rest.model.ScannerModel
-
- setStartRow(ByteString) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner.Builder
optional bytes startRow = 1;
- setStartRow(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpen_args
- Starting row in table to scan.
- setStartRow(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpen_args
-
- setStartRow(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenTs_args
- Starting row in table to scan.
- setStartRow(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenTs_args
-
- setStartRow(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStop_args
- Starting row in table to scan.
- setStartRow(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStop_args
-
- setStartRow(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStopTs_args
- Starting row in table to scan.
- setStartRow(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStopTs_args
-
- setStartRow(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.TScan
-
- setStartRow(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.TScan
-
- setStartRow(byte[]) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TScan
-
- setStartRow(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TScan
-
- setStartRowIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpen_args
-
- setStartRowIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenTs_args
-
- setStartRowIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStop_args
-
- setStartRowIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStopTs_args
-
- setStartRowIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.TScan
-
- setStartRowIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TScan
-
- setStartTime(long) -
Method in class org.apache.hadoop.hbase.rest.model.ScannerModel
-
- setStartTime(long) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner.Builder
optional int64 startTime = 5;
- setStartTime(long) -
Method in class org.apache.hadoop.hbase.rest.RowSpec
-
- setState(ClusterStatusProtos.RegionState.State) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.Builder
required .RegionState.State state = 2;
- setState(ClusterStatusProtos.RegionState.State) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.Builder
optional .RegionState.State state = 3;
- setState(ZooKeeperProtos.ReplicationState.State) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.Builder
required .ReplicationState.State state = 1;
- setState(ZooKeeperProtos.SplitLogTask.State) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.Builder
required .SplitLogTask.State state = 1;
- setState(ZooKeeperProtos.Table.State) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.Builder
required .Table.State state = 1 [default = ENABLED];
- setStatisticTracker(ServerStatisticTracker) -
Method in class org.apache.hadoop.hbase.client.RpcRetryingCallerFactory
- Set the tracker that should be used for tracking statistics about the server
- setStatus(String) -
Method in interface org.apache.hadoop.hbase.monitoring.MonitoredTask
-
- setStatus(int) -
Method in class org.apache.hadoop.hbase.rest.filter.GZIPResponseWrapper
-
- setStopRow(byte[]) -
Method in class org.apache.hadoop.hbase.client.Scan
- Set the stop row.
- setStopRow(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.Builder
optional bytes stop_row = 4;
- setStopRow(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStop_args
- row to stop scanning on.
- setStopRow(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStop_args
-
- setStopRow(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStopTs_args
- row to stop scanning on.
- setStopRow(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStopTs_args
-
- setStopRow(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.TScan
-
- setStopRow(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.TScan
-
- setStopRow(byte[]) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TScan
-
- setStopRow(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TScan
-
- setStopRowIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStop_args
-
- setStopRowIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStopTs_args
-
- setStopRowIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.TScan
-
- setStopRowIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TScan
-
- setStopRowKey(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.InclusiveStopFilter.Builder
optional bytes stop_row_key = 1;
- setStoreFile(int, String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse.Builder
repeated string store_file = 1;
- setStorefileIndexSizeMB(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder
optional uint32 storefile_index_size_MB = 7;
- setStorefileIndexSizeMB(int) -
Method in class org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel.Node.Region
-
- setStorefileIndexSizeMB(int) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder
optional int32 storefileIndexSizeMB = 6;
- setStorefiles(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder
optional uint32 storefiles = 3;
- setStoreFiles(int, SnapshotProtos.SnapshotRegionManifest.StoreFile) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles.Builder
repeated .SnapshotRegionManifest.StoreFile store_files = 2;
- setStoreFiles(int, SnapshotProtos.SnapshotRegionManifest.StoreFile.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles.Builder
repeated .SnapshotRegionManifest.StoreFile store_files = 2;
- setStorefiles(int) -
Method in class org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel.Node.Region
-
- setStorefiles(int) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder
optional int32 storefiles = 3;
- setStorefileSizeMB(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder
optional uint32 storefile_size_MB = 5;
- setStorefileSizeMB(int) -
Method in class org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel.Node.Region
-
- setStorefileSizeMB(int) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder
optional int32 storefileSizeMB = 4;
- setStoreHomeDir(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor.Builder
required string store_home_dir = 6;
- setStoreHomeDirBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor.Builder
required string store_home_dir = 6;
- setStoreLimit(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Get.Builder
optional uint32 store_limit = 8;
- setStoreLimit(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.Builder
optional uint32 store_limit = 11;
- setStoreOffset(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Get.Builder
optional uint32 store_offset = 9;
- setStoreOffset(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.Builder
optional uint32 store_offset = 12;
- setStores(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder
optional uint32 stores = 2;
- setStores(int) -
Method in class org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel.Node.Region
-
- setStores(int) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder
optional int32 stores = 2;
- setStoreSequenceId(int, ZooKeeperProtos.StoreSequenceId) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds.Builder
repeated .StoreSequenceId store_sequence_id = 2;
- setStoreSequenceId(int, ZooKeeperProtos.StoreSequenceId.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds.Builder
repeated .StoreSequenceId store_sequence_id = 2;
- setStoreUncompressedSizeMB(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder
optional uint32 store_uncompressed_size_MB = 4;
- setSubstr(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos.SubstringComparator.Builder
required string substr = 1;
- setSubstrBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos.SubstringComparator.Builder
required string substr = 1;
- setSuccess(long) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.atomicIncrement_result
-
- setSuccess(List<TCell>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.get_result
-
- setSuccess(Map<ByteBuffer, ColumnDescriptor>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getColumnDescriptors_result
-
- setSuccess(TRegionInfo) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRegionInfo_result
-
- setSuccess(List<TRowResult>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRow_result
-
- setSuccess(List<TCell>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowOrBefore_result
-
- setSuccess(List<TRowResult>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRows_result
-
- setSuccess(List<TRowResult>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsTs_result
-
- setSuccess(List<TRowResult>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsWithColumns_result
-
- setSuccess(List<TRowResult>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsWithColumnsTs_result
-
- setSuccess(List<TRowResult>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowTs_result
-
- setSuccess(List<TRowResult>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowWithColumns_result
-
- setSuccess(List<TRowResult>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowWithColumnsTs_result
-
- setSuccess(List<ByteBuffer>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getTableNames_result
-
- setSuccess(List<TRegionInfo>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getTableRegions_result
-
- setSuccess(List<TCell>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getVer_result
-
- setSuccess(List<TCell>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getVerTs_result
-
- setSuccess(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.isTableEnabled_result
-
- setSuccess(List<TRowResult>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerGet_result
-
- setSuccess(List<TRowResult>) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerGetList_result
-
- setSuccess(int) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpen_result
-
- setSuccess(int) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenTs_result
-
- setSuccess(int) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithPrefix_result
-
- setSuccess(int) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithScan_result
-
- setSuccess(int) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStop_result
-
- setSuccess(int) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStopTs_result
-
- setSuccess(TResult) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.append_result
-
- setSuccess(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndDelete_result
-
- setSuccess(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndPut_result
-
- setSuccess(List<TDelete>) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.deleteMultiple_result
-
- setSuccess(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.exists_result
-
- setSuccess(TResult) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.get_result
-
- setSuccess(List<TResult>) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.getMultiple_result
-
- setSuccess(List<TResult>) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.getScannerResults_result
-
- setSuccess(List<TResult>) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.getScannerRows_result
-
- setSuccess(TResult) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.increment_result
-
- setSuccess(int) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.openScanner_result
-
- setSuccessIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.atomicIncrement_result
-
- setSuccessIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.get_result
-
- setSuccessIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getColumnDescriptors_result
-
- setSuccessIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRegionInfo_result
-
- setSuccessIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRow_result
-
- setSuccessIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowOrBefore_result
-
- setSuccessIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRows_result
-
- setSuccessIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsTs_result
-
- setSuccessIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsWithColumns_result
-
- setSuccessIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsWithColumnsTs_result
-
- setSuccessIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowTs_result
-
- setSuccessIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowWithColumns_result
-
- setSuccessIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowWithColumnsTs_result
-
- setSuccessIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getTableNames_result
-
- setSuccessIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getTableRegions_result
-
- setSuccessIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getVer_result
-
- setSuccessIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getVerTs_result
-
- setSuccessIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.isTableEnabled_result
-
- setSuccessIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerGet_result
-
- setSuccessIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerGetList_result
-
- setSuccessIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpen_result
-
- setSuccessIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenTs_result
-
- setSuccessIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithPrefix_result
-
- setSuccessIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithScan_result
-
- setSuccessIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStop_result
-
- setSuccessIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStopTs_result
-
- setSuccessIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.append_result
-
- setSuccessIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndDelete_result
-
- setSuccessIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndPut_result
-
- setSuccessIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.deleteMultiple_result
-
- setSuccessIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.exists_result
-
- setSuccessIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.get_result
-
- setSuccessIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.getMultiple_result
-
- setSuccessIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.getScannerResults_result
-
- setSuccessIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.getScannerRows_result
-
- setSuccessIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.increment_result
-
- setSuccessIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.openScanner_result
-
- setSuffixLengthVariance(int) -
Method in class org.apache.hadoop.hbase.util.test.RedundantKVGenerator
-
- setSynchronous(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest.Builder
optional bool synchronous = 2;
- setTable(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder
optional string table = 2;
- setTable(HBaseProtos.TableSchema) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.TableSnapshotRegionSplit.Builder
optional .TableSchema table = 3;
- setTable(HBaseProtos.TableSchema.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.TableSnapshotRegionSplit.Builder
optional .TableSchema table = 3;
- setTable(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.TIncrement
-
- setTable(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.TIncrement
-
- setTable(byte[]) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.append_args
- the table to append the value on
- setTable(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.append_args
-
- setTable(byte[]) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndDelete_args
- to check in and delete from
- setTable(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndDelete_args
-
- setTable(byte[]) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndPut_args
- to check in and put to
- setTable(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndPut_args
-
- setTable(byte[]) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.deleteMultiple_args
- the table to delete from
- setTable(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.deleteMultiple_args
-
- setTable(byte[]) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.deleteSingle_args
- the table to delete from
- setTable(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.deleteSingle_args
-
- setTable(byte[]) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.exists_args
- the table to check on
- setTable(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.exists_args
-
- setTable(byte[]) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.get_args
- the table to get from
- setTable(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.get_args
-
- setTable(byte[]) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.getMultiple_args
- the table to get from
- setTable(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.getMultiple_args
-
- setTable(byte[]) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.getScannerResults_args
- the table to get the Scanner for
- setTable(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.getScannerResults_args
-
- setTable(byte[]) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.increment_args
- the table to increment the value on
- setTable(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.increment_args
-
- setTable(byte[]) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.mutateRow_args
- table to apply the mutations
- setTable(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.mutateRow_args
-
- setTable(byte[]) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.openScanner_args
- the table to get the Scanner for
- setTable(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.openScanner_args
-
- setTable(byte[]) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.put_args
- the table to put data in
- setTable(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.put_args
-
- setTable(byte[]) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.putMultiple_args
- the table to put data in
- setTable(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.putMultiple_args
-
- setTableBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder
optional string table = 2;
- setTableGroupPermissions(String, TableName, List<TablePermission>) -
Method in class org.apache.hadoop.hbase.security.access.TableAuthManager
- Overwrites the existing permission set for a group and triggers an update
for zookeeper synchronization.
- setTableInfo(HBaseFsck.TableInfo) -
Method in interface org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler
- Set the TableInfo used by all HRegionInfos fabricated by other callbacks
- setTableInfo(HBaseFsck.TableInfo) -
Method in class org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandlerImpl
- Set the TableInfo used by all HRegionInfos fabricated by other callbacks
- setTableIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.TIncrement
-
- setTableIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.append_args
-
- setTableIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndDelete_args
-
- setTableIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndPut_args
-
- setTableIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.deleteMultiple_args
-
- setTableIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.deleteSingle_args
-
- setTableIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.exists_args
-
- setTableIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.get_args
-
- setTableIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.getMultiple_args
-
- setTableIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.getScannerResults_args
-
- setTableIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.increment_args
-
- setTableIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.mutateRow_args
-
- setTableIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.openScanner_args
-
- setTableIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.put_args
-
- setTableIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.putMultiple_args
-
- setTableName(HBaseProtos.TableName) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.GetUserPermissionsRequest.Builder
optional .TableName table_name = 2;
- setTableName(HBaseProtos.TableName.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.GetUserPermissionsRequest.Builder
optional .TableName table_name = 2;
- setTableName(HBaseProtos.TableName) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.TablePermission.Builder
optional .TableName table_name = 1;
- setTableName(HBaseProtos.TableName.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.TablePermission.Builder
optional .TableName table_name = 1;
- setTableName(HBaseProtos.TableName) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder
required .TableName table_name = 2;
- setTableName(HBaseProtos.TableName.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder
required .TableName table_name = 2;
- setTableName(HBaseProtos.TableName) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder
optional .TableName table_name = 1;
- setTableName(HBaseProtos.TableName.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder
optional .TableName table_name = 1;
- setTableName(HBaseProtos.TableName) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest.Builder
required .TableName table_name = 1;
- setTableName(HBaseProtos.TableName.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest.Builder
required .TableName table_name = 1;
- setTableName(HBaseProtos.TableName) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnRequest.Builder
required .TableName table_name = 1;
- setTableName(HBaseProtos.TableName.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnRequest.Builder
required .TableName table_name = 1;
- setTableName(HBaseProtos.TableName) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableRequest.Builder
required .TableName table_name = 1;
- setTableName(HBaseProtos.TableName.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableRequest.Builder
required .TableName table_name = 1;
- setTableName(HBaseProtos.TableName) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableRequest.Builder
required .TableName table_name = 1;
- setTableName(HBaseProtos.TableName.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableRequest.Builder
required .TableName table_name = 1;
- setTableName(HBaseProtos.TableName) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableRequest.Builder
required .TableName table_name = 1;
- setTableName(HBaseProtos.TableName.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableRequest.Builder
required .TableName table_name = 1;
- setTableName(HBaseProtos.TableName) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest.Builder
required .TableName table_name = 1;
- setTableName(HBaseProtos.TableName.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest.Builder
required .TableName table_name = 1;
- setTableName(int, HBaseProtos.TableName) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.Builder
repeated .TableName tableName = 1;
- setTableName(int, HBaseProtos.TableName.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.Builder
repeated .TableName tableName = 1;
- setTableName(HBaseProtos.TableName) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest.Builder
required .TableName table_name = 1;
- setTableName(HBaseProtos.TableName.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest.Builder
required .TableName table_name = 1;
- setTableName(HBaseProtos.TableName) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRequest.Builder
required .TableName table_name = 1;
- setTableName(HBaseProtos.TableName.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRequest.Builder
required .TableName table_name = 1;
- setTableName(HBaseProtos.TableName) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableRequest.Builder
required .TableName tableName = 1;
- setTableName(HBaseProtos.TableName.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableRequest.Builder
required .TableName tableName = 1;
- setTableName(HBaseProtos.TableName) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.PrepareBulkLoadRequest.Builder
required .TableName table_name = 1;
- setTableName(HBaseProtos.TableName.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.PrepareBulkLoadRequest.Builder
required .TableName table_name = 1;
- setTableName(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor.Builder
required bytes table_name = 1;
- setTableName(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.Builder
required bytes table_name = 2;
- setTableName(HBaseProtos.TableName) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock.Builder
optional .TableName table_name = 1;
- setTableName(HBaseProtos.TableName.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock.Builder
optional .TableName table_name = 1;
- setTableName(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.atomicIncrement_args
- name of table
- setTableName(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.atomicIncrement_args
-
- setTableName(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.createTable_args
- name of table to create
- setTableName(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.createTable_args
-
- setTableName(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAll_args
- name of table
- setTableName(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAll_args
-
- setTableName(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAllRow_args
- name of table
- setTableName(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAllRow_args
-
- setTableName(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAllRowTs_args
- name of table
- setTableName(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAllRowTs_args
-
- setTableName(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAllTs_args
- name of table
- setTableName(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAllTs_args
-
- setTableName(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteTable_args
- name of table to delete
- setTableName(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteTable_args
-
- setTableName(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.disableTable_args
- name of the table
- setTableName(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.disableTable_args
-
- setTableName(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.enableTable_args
- name of the table
- setTableName(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.enableTable_args
-
- setTableName(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.get_args
- name of table
- setTableName(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.get_args
-
- setTableName(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getColumnDescriptors_args
- table name
- setTableName(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getColumnDescriptors_args
-
- setTableName(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRow_args
- name of table
- setTableName(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRow_args
-
- setTableName(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowOrBefore_args
- name of table
- setTableName(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowOrBefore_args
-
- setTableName(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRows_args
- name of table
- setTableName(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRows_args
-
- setTableName(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsTs_args
- name of the table
- setTableName(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsTs_args
-
- setTableName(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsWithColumns_args
- name of table
- setTableName(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsWithColumns_args
-
- setTableName(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsWithColumnsTs_args
- name of table
- setTableName(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsWithColumnsTs_args
-
- setTableName(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowTs_args
- name of the table
- setTableName(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowTs_args
-
- setTableName(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowWithColumns_args
- name of table
- setTableName(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowWithColumns_args
-
- setTableName(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowWithColumnsTs_args
- name of table
- setTableName(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowWithColumnsTs_args
-
- setTableName(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getTableRegions_args
- table name
- setTableName(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getTableRegions_args
-
- setTableName(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getVer_args
- name of table
- setTableName(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getVer_args
-
- setTableName(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getVerTs_args
- name of table
- setTableName(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getVerTs_args
-
- setTableName(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.isTableEnabled_args
- name of the table to check
- setTableName(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.isTableEnabled_args
-
- setTableName(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRow_args
- name of table
- setTableName(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRow_args
-
- setTableName(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRows_args
- name of table
- setTableName(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRows_args
-
- setTableName(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRowsTs_args
- name of table
- setTableName(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRowsTs_args
-
- setTableName(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRowTs_args
- name of table
- setTableName(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRowTs_args
-
- setTableName(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpen_args
- name of table
- setTableName(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpen_args
-
- setTableName(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenTs_args
- name of table
- setTableName(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenTs_args
-
- setTableName(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithPrefix_args
- name of table
- setTableName(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithPrefix_args
-
- setTableName(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithScan_args
- name of table
- setTableName(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithScan_args
-
- setTableName(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStop_args
- name of table
- setTableName(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStop_args
-
- setTableName(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStopTs_args
- name of table
- setTableName(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStopTs_args
-
- setTableNameIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.atomicIncrement_args
-
- setTableNameIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.createTable_args
-
- setTableNameIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAll_args
-
- setTableNameIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAllRow_args
-
- setTableNameIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAllRowTs_args
-
- setTableNameIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAllTs_args
-
- setTableNameIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteTable_args
-
- setTableNameIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.disableTable_args
-
- setTableNameIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.enableTable_args
-
- setTableNameIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.get_args
-
- setTableNameIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getColumnDescriptors_args
-
- setTableNameIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRow_args
-
- setTableNameIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowOrBefore_args
-
- setTableNameIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRows_args
-
- setTableNameIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsTs_args
-
- setTableNameIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsWithColumns_args
-
- setTableNameIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsWithColumnsTs_args
-
- setTableNameIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowTs_args
-
- setTableNameIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowWithColumns_args
-
- setTableNameIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowWithColumnsTs_args
-
- setTableNameIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getTableRegions_args
-
- setTableNameIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getVer_args
-
- setTableNameIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getVerTs_args
-
- setTableNameIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.isTableEnabled_args
-
- setTableNameIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRow_args
-
- setTableNameIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRows_args
-
- setTableNameIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRowsTs_args
-
- setTableNameIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRowTs_args
-
- setTableNameIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpen_args
-
- setTableNameIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenTs_args
-
- setTableNameIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithPrefix_args
-
- setTableNameIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithScan_args
-
- setTableNameIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStop_args
-
- setTableNameIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStopTs_args
-
- setTableNameOrRegionName(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.compact_args
-
- setTableNameOrRegionName(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.compact_args
-
- setTableNameOrRegionName(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.majorCompact_args
-
- setTableNameOrRegionName(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.majorCompact_args
-
- setTableNameOrRegionNameIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.compact_args
-
- setTableNameOrRegionNameIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.majorCompact_args
-
- setTableNames(int, HBaseProtos.TableName) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest.Builder
repeated .TableName table_names = 1;
- setTableNames(int, HBaseProtos.TableName.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest.Builder
repeated .TableName table_names = 1;
- setTableNames(int, HBaseProtos.TableName) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesResponse.Builder
repeated .TableName table_names = 1;
- setTableNames(int, HBaseProtos.TableName.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesResponse.Builder
repeated .TableName table_names = 1;
- setTablePermission(AccessControlProtos.TablePermission) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.Permission.Builder
optional .TablePermission table_permission = 4;
- setTablePermission(AccessControlProtos.TablePermission.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.Permission.Builder
optional .TablePermission table_permission = 4;
- setTableRecordReader(TableRecordReader) -
Method in class org.apache.hadoop.hbase.mapred.TableInputFormatBase
- Deprecated. Allows subclasses to set the
TableRecordReader.
- setTableRecordReader(TableRecordReader) -
Method in class org.apache.hadoop.hbase.mapreduce.MultiTableInputFormatBase
- Allows subclasses to set the
TableRecordReader.
- setTableRecordReader(TableRecordReader) -
Method in class org.apache.hadoop.hbase.mapreduce.TableInputFormatBase
- Allows subclasses to set the
TableRecordReader.
- setTables(List<TableModel>) -
Method in class org.apache.hadoop.hbase.rest.model.TableListModel
-
- setTableSchema(HBaseProtos.TableSchema) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest.Builder
required .TableSchema table_schema = 1;
- setTableSchema(HBaseProtos.TableSchema.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest.Builder
required .TableSchema table_schema = 1;
- setTableSchema(int, HBaseProtos.TableSchema) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse.Builder
repeated .TableSchema table_schema = 1;
- setTableSchema(int, HBaseProtos.TableSchema.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse.Builder
repeated .TableSchema table_schema = 1;
- setTableSchema(int, HBaseProtos.TableSchema) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.Builder
repeated .TableSchema tableSchema = 1;
- setTableSchema(int, HBaseProtos.TableSchema.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.Builder
repeated .TableSchema tableSchema = 1;
- setTableSchema(HBaseProtos.TableSchema) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRequest.Builder
required .TableSchema table_schema = 2;
- setTableSchema(HBaseProtos.TableSchema.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRequest.Builder
required .TableSchema table_schema = 2;
- setTableSchema(HBaseProtos.TableSchema) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest.Builder
required .TableSchema table_schema = 1;
- setTableSchema(HBaseProtos.TableSchema.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest.Builder
required .TableSchema table_schema = 1;
- setTableUserPermissions(String, TableName, List<TablePermission>) -
Method in class org.apache.hadoop.hbase.security.access.TableAuthManager
- Overwrites the existing permission set for a given user for a table, and
triggers an update for zookeeper synchronization.
- setTagCompressionContext(TagCompressionContext) -
Method in class org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext
-
- setTagCompressionContext(TagCompressionContext) -
Method in class org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext
-
- setTags(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell.Builder
optional bytes tags = 7;
- setTags(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.CellProtos.KeyValue.Builder
optional bytes tags = 7;
- setTags(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue.Builder
optional bytes tags = 5;
- setTags(byte[]) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TColumnValue
-
- setTags(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TColumnValue
-
- setTagsIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TColumnValue
-
- setTagsOffsetWidth(int) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta
-
- setTargetTableName(String[]) -
Method in class org.apache.hadoop.hbase.master.RegionPlacementMaintainer
-
- setTaskMonitor(TaskMonitor) -
Method in class org.apache.hadoop.hbase.tmpl.common.TaskMonitorTmpl.ImplData
-
- setTaskMonitor(TaskMonitor) -
Method in class org.apache.hadoop.hbase.tmpl.common.TaskMonitorTmpl
-
- setThird(C) -
Method in class org.apache.hadoop.hbase.util.Triple
-
- setThreadId(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock.Builder
optional int64 thread_id = 3;
- setTickTime(int) -
Method in class org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster
-
- setTimeLag(long) -
Method in class org.apache.hadoop.hbase.util.HBaseFsck
- We are interested in only those tables that have not changed their state in
hbase:meta during the last few seconds specified by hbase.admin.fsck.timelag
- setTimeRange(long, long) -
Method in class org.apache.hadoop.hbase.client.Get
- Get versions of columns only within the specified timestamp range,
[minStamp, maxStamp).
- setTimeRange(long, long) -
Method in class org.apache.hadoop.hbase.client.Increment
- Sets the TimeRange to be used on the Get for this increment.
- setTimeRange(long, long) -
Method in class org.apache.hadoop.hbase.client.Scan
- Get versions of columns only within the specified timestamp range,
[minStamp, maxStamp).
- setTimeRange(HBaseProtos.TimeRange) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Get.Builder
optional .TimeRange time_range = 5;
- setTimeRange(HBaseProtos.TimeRange.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Get.Builder
optional .TimeRange time_range = 5;
- setTimeRange(HBaseProtos.TimeRange) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.Builder
optional .TimeRange time_range = 7;
- setTimeRange(HBaseProtos.TimeRange.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.Builder
optional .TimeRange time_range = 7;
- setTimeRange(HBaseProtos.TimeRange) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.Builder
optional .TimeRange time_range = 6;
- setTimeRange(HBaseProtos.TimeRange.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.Builder
optional .TimeRange time_range = 6;
- setTimeRange(TTimeRange) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TGet
-
- setTimeRange(TTimeRange) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TScan
-
- setTimeRangeIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TGet
-
- setTimeRangeIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TScan
-
- setTimeRangeTracker(TimeRangeTracker) -
Method in class org.apache.hadoop.hbase.regionserver.StoreFile.Writer
- Set TimeRangeTracker
- setTimestamp(long) -
Method in class org.apache.hadoop.hbase.client.Delete
- Set the timestamp of the delete.
- setTimeStamp(long) -
Method in class org.apache.hadoop.hbase.client.Get
- Get versions of columns with the specified timestamp.
- setTimeStamp(long) -
Method in class org.apache.hadoop.hbase.client.Scan
- Get versions of columns with the specified timestamp.
- setTimestamp(long) -
Method in class org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.Builder
optional uint64 timestamp = 3;
- setTimestamp(long) -
Method in class org.apache.hadoop.hbase.master.RegionState
-
- setTimestamp(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell.Builder
optional uint64 timestamp = 4;
- setTimestamp(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.CellProtos.KeyValue.Builder
optional uint64 timestamp = 4;
- setTimestamp(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.Builder
optional uint64 timestamp = 4;
- setTimestamp(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue.Builder
optional uint64 timestamp = 3;
- setTimestamp(long) -
Method in class org.apache.hadoop.hbase.rest.model.CellModel
-
- setTimestamp(long) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell.Builder
optional int64 timestamp = 3;
- setTimestamp(long) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAllRowTs_args
- timestamp
- setTimestamp(long) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAllTs_args
- timestamp
- setTimestamp(long) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsTs_args
- timestamp
- setTimestamp(long) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsWithColumnsTs_args
-
- setTimestamp(long) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowTs_args
- timestamp
- setTimestamp(long) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowWithColumnsTs_args
-
- setTimestamp(long) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getVerTs_args
- timestamp
- setTimestamp(long) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRowsTs_args
- timestamp
- setTimestamp(long) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRowTs_args
- timestamp
- setTimestamp(long) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenTs_args
- timestamp
- setTimestamp(long) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStopTs_args
- timestamp
- setTimestamp(long) -
Method in class org.apache.hadoop.hbase.thrift.generated.TCell
-
- setTimestamp(long) -
Method in class org.apache.hadoop.hbase.thrift.generated.TScan
-
- setTimestamp(long) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TColumn
-
- setTimestamp(long) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TColumnValue
-
- setTimestamp(long) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TDelete
-
- setTimestamp(long) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TGet
-
- setTimestamp(long) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TPut
-
- setTimestampDeltaWidth(int) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta
-
- setTimestampDiffSize(int) -
Method in class org.apache.hadoop.hbase.util.test.RedundantKVGenerator
-
- setTimestampFields(LongEncoder) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta
-
- setTimestampIndexWidth(int) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta
-
- setTimestampIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAllRowTs_args
-
- setTimestampIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAllTs_args
-
- setTimestampIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsTs_args
-
- setTimestampIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsWithColumnsTs_args
-
- setTimestampIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowTs_args
-
- setTimestampIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowWithColumnsTs_args
-
- setTimestampIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.getVerTs_args
-
- setTimestampIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRowsTs_args
-
- setTimestampIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.mutateRowTs_args
-
- setTimestampIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenTs_args
-
- setTimestampIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStopTs_args
-
- setTimestampIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.TCell
-
- setTimestampIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.TScan
-
- setTimestampIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TColumn
-
- setTimestampIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TColumnValue
-
- setTimestampIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TDelete
-
- setTimestampIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TGet
-
- setTimestampIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TPut
-
- setTimeStampOfLastShippedOp(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder
required uint64 timeStampOfLastShippedOp = 4;
- setTimestamps(int, long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.TimestampsFilter.Builder
repeated int64 timestamps = 1 [packed = true];
- setTimeStampsOfLastAppliedOp(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder
required uint64 timeStampsOfLastAppliedOp = 2;
- setTimeToLive(int) -
Method in class org.apache.hadoop.hbase.HColumnDescriptor
-
- setTimeToLive(int) -
Method in class org.apache.hadoop.hbase.thrift.generated.ColumnDescriptor
-
- setTimeToLiveIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.ColumnDescriptor
-
- setTimeUnit(TimeUnit) -
Method in class org.apache.hadoop.hbase.util.RetryCounter.RetryConfig
-
- setTo(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.Builder
optional uint64 to = 2;
- setToken(ByteRange) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.encode.tokenize.TokenizerNode
-
- setToken(AuthenticationProtos.Token) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos.GetAuthenticationTokenResponse.Builder
optional .Token token = 1;
- setToken(AuthenticationProtos.Token.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos.GetAuthenticationTokenResponse.Builder
optional .Token token = 1;
- setTokenBytes(ByteRange) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.encode.column.ColumnNodeWriter
-
- setTokenOffset(int) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.encode.tokenize.TokenizerNode
-
- setTotalCompactingKVs(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder
optional uint64 total_compacting_KVs = 10;
- setTotalCompactingKVs(long) -
Method in class org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel.Node.Region
-
- setTotalCompactingKVs(long) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder
optional int64 totalCompactingKVs = 12;
- setTotalNumberOfRequests(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder
optional uint32 total_number_of_requests = 2;
- setTotalRegions(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse.Builder
optional uint32 total_regions = 2;
- setTotalStaticBloomSizeKB(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder
optional uint32 total_static_bloom_size_KB = 14;
- setTotalStaticBloomSizeKB(int) -
Method in class org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel.Node.Region
-
- setTotalStaticBloomSizeKB(int) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder
optional int32 totalStaticBloomSizeKB = 11;
- setTotalStaticIndexSizeKB(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder
optional uint32 total_static_index_size_KB = 13;
- setTotalStaticIndexSizeKB(int) -
Method in class org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel.Node.Region
-
- setTotalStaticIndexSizeKB(int) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder
optional int32 totalStaticIndexSizeKB = 10;
- setTotalUncompressedBytes(long) -
Method in class org.apache.hadoop.hbase.io.hfile.FixedFileTrailer
-
- setTotalUncompressedBytes(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HFileProtos.FileTrailerProto.Builder
optional uint64 total_uncompressed_bytes = 4;
- setTrace(int, ErrorHandlingProtos.StackTraceElementMessage) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.GenericExceptionMessage.Builder
repeated .StackTraceElementMessage trace = 4;
- setTrace(int, ErrorHandlingProtos.StackTraceElementMessage.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.GenericExceptionMessage.Builder
repeated .StackTraceElementMessage trace = 4;
- setTraceId(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.TracingProtos.RPCTInfo.Builder
optional int64 trace_id = 1;
- setTraceInfo(TracingProtos.RPCTInfo) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RequestHeader.Builder
optional .RPCTInfo trace_info = 2;
- setTraceInfo(TracingProtos.RPCTInfo.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RequestHeader.Builder
optional .RPCTInfo trace_info = 2;
- setTransition(int, RegionServerStatusProtos.RegionStateTransition) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest.Builder
repeated .RegionStateTransition transition = 2;
- setTransition(int, RegionServerStatusProtos.RegionStateTransition.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest.Builder
repeated .RegionStateTransition transition = 2;
- setTransitionCode(RegionServerStatusProtos.RegionStateTransition.TransitionCode) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.Builder
required .RegionStateTransition.TransitionCode transition_code = 1;
- setTransitionInZK(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest.Builder
optional bool transition_in_ZK = 3 [default = true];
- setTTL(long) -
Method in class org.apache.hadoop.hbase.client.Delete
-
- setTTL(long) -
Method in class org.apache.hadoop.hbase.client.Mutation
- Set the TTL desired for the result of the mutation, in milliseconds.
- setTtl(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse.Builder
optional uint32 ttl = 4;
- setTtl(int) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder
optional int32 ttl = 3;
- setType(AccessControlProtos.Permission.Type) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.GetUserPermissionsRequest.Builder
optional .Permission.Type type = 1;
- setType(AccessControlProtos.Permission.Type) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.Permission.Builder
required .Permission.Type type = 1;
- setType(HBaseProtos.RegionSpecifier.RegionSpecifierType) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder
required .RegionSpecifier.RegionSpecifierType type = 1;
- setType(HBaseProtos.SnapshotDescription.Type) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder
optional .SnapshotDescription.Type type = 4 [default = FLUSH];
- setType(SnapshotProtos.SnapshotFileInfo.Type) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Builder
required .SnapshotFileInfo.Type type = 1;
- setUncaughtExceptionHandler(Thread.UncaughtExceptionHandler) -
Method in class org.apache.hadoop.hbase.util.HasThread
-
- setUncompressedDataIndexSize(long) -
Method in class org.apache.hadoop.hbase.io.hfile.FixedFileTrailer
-
- setUncompressedDataIndexSize(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HFileProtos.FileTrailerProto.Builder
optional uint64 uncompressed_data_index_size = 3;
- setup(Mapper<ImmutableBytesWritable, Result, ImmutableBytesWritable, Mutation>.Context) -
Method in class org.apache.hadoop.hbase.mapreduce.Import.Importer
-
- setup(Mapper<ImmutableBytesWritable, Result, ImmutableBytesWritable, KeyValue>.Context) -
Method in class org.apache.hadoop.hbase.mapreduce.Import.KeyValueImporter
-
- setup(Mapper<ImmutableBytesWritable, Result, ImmutableBytesWritable, Put>.Context) -
Method in class org.apache.hadoop.hbase.mapreduce.IndexBuilder.Map
-
- setup(Reducer<ImmutableBytesWritable, Text, ImmutableBytesWritable, KeyValue>.Context) -
Method in class org.apache.hadoop.hbase.mapreduce.TextSortReducer
- Handles initializing this class with objects specific to it (i.e., the parser).
- setup(Mapper<LongWritable, Text, ImmutableBytesWritable, Put>.Context) -
Method in class org.apache.hadoop.hbase.mapreduce.TsvImporterMapper
- Handles initializing this class with objects specific to it (i.e., the parser).
- setup(Mapper<LongWritable, Text, ImmutableBytesWritable, Text>.Context) -
Method in class org.apache.hadoop.hbase.mapreduce.TsvImporterTextMapper
- Handles initializing this class with objects specific to it (i.e., the parser).
- setup(RegionServerServices) -
Method in interface org.apache.hadoop.hbase.regionserver.compactions.CompactionThroughputController
- Setup controller for the given region server.
- setup(RegionServerServices) -
Method in class org.apache.hadoop.hbase.regionserver.compactions.NoLimitCompactionThroughputController
-
- setup(RegionServerServices) -
Method in class org.apache.hadoop.hbase.regionserver.compactions.PressureAwareCompactionThroughputController
-
- setupConnection() -
Method in class org.apache.hadoop.hbase.ipc.RpcClient.Connection
-
- setUpdateInfo(int, AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.Builder
repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1;
- setUpdateInfo(int, AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.Builder
repeated .UpdateFavoredNodesRequest.RegionUpdateInfo update_info = 1;
- setupIOstreams() -
Method in class org.apache.hadoop.hbase.ipc.RpcClient.Connection
-
- setupJob(JobContext) -
Method in class org.apache.hadoop.hbase.mapreduce.TableOutputCommitter
-
- setupShortCircuitRead(Configuration) -
Static method in class org.apache.hadoop.hbase.util.FSUtils
- Do our short circuit read setup.
- setupTask(TaskAttemptContext) -
Method in class org.apache.hadoop.hbase.mapreduce.TableOutputCommitter
-
- setUrl(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.Builder
required string url = 2;
- setUrlBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.Builder
required string url = 2;
- setUsedHeapMB(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder
optional uint32 used_heap_MB = 3;
- setUser(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UserPermission.Builder
required bytes user = 1;
- setUser(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions.UserPermissions.Builder
required bytes user = 1;
- setUser(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.Builder
required string user = 4;
- setUser(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.GetAuthsRequest.Builder
required bytes user = 1;
- setUser(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.GetAuthsResponse.Builder
required bytes user = 1;
- setUser(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.SetAuthsRequest.Builder
required bytes user = 1;
- setUser(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.UserAuthorizations.Builder
required bytes user = 1;
- setUserAuths(int, VisibilityLabelsProtos.UserAuthorizations) -
Method in class org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.MultiUserAuthorizations.Builder
repeated .UserAuthorizations userAuths = 1;
- setUserAuths(int, VisibilityLabelsProtos.UserAuthorizations.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.MultiUserAuthorizations.Builder
repeated .UserAuthorizations userAuths = 1;
- setUserBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.Builder
required string user = 4;
- setUserInfo(RPCProtos.UserInformation) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ConnectionHeader.Builder
optional .UserInformation user_info = 1;
- setUserInfo(RPCProtos.UserInformation.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ConnectionHeader.Builder
optional .UserInformation user_info = 1;
- setUsername(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos.TokenIdentifier.Builder
required bytes username = 2;
- setUsername(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos.WhoAmIResponse.Builder
optional string username = 1;
- setUsernameBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos.WhoAmIResponse.Builder
optional string username = 1;
- setUserPermission(int, AccessControlProtos.UserPermission) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.GetUserPermissionsResponse.Builder
repeated .UserPermission user_permission = 1;
- setUserPermission(int, AccessControlProtos.UserPermission.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.GetUserPermissionsResponse.Builder
repeated .UserPermission user_permission = 1;
- setUserPermission(AccessControlProtos.UserPermission) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.GrantRequest.Builder
required .UserPermission user_permission = 1;
- setUserPermission(AccessControlProtos.UserPermission.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.GrantRequest.Builder
required .UserPermission user_permission = 1;
- setUserPermission(AccessControlProtos.UserPermission) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.RevokeRequest.Builder
required .UserPermission user_permission = 1;
- setUserPermission(AccessControlProtos.UserPermission.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.RevokeRequest.Builder
required .UserPermission user_permission = 1;
- setUserPermissions(int, AccessControlProtos.UsersAndPermissions.UserPermissions) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions.Builder
repeated .UsersAndPermissions.UserPermissions user_permissions = 1;
- setUserPermissions(int, AccessControlProtos.UsersAndPermissions.UserPermissions.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions.Builder
repeated .UsersAndPermissions.UserPermissions user_permissions = 1;
- setUserProviderForTesting(Configuration, Class<? extends UserProvider>) -
Static method in class org.apache.hadoop.hbase.security.UserProvider
- Set the
UserProvider in the given configuration that should be instantiated
- setValue(byte[], byte[]) -
Method in class org.apache.hadoop.hbase.client.UnmodifyableHTableDescriptor
-
- setValue(String, String) -
Method in class org.apache.hadoop.hbase.client.UnmodifyableHTableDescriptor
-
- setValue(byte[], byte[]) -
Method in class org.apache.hadoop.hbase.HColumnDescriptor
-
- setValue(String, String) -
Method in class org.apache.hadoop.hbase.HColumnDescriptor
-
- setValue(byte[], byte[]) -
Method in class org.apache.hadoop.hbase.HTableDescriptor
- Setter for storing metadata as a (key, value) pair in
HTableDescriptor.values map
- setValue(ImmutableBytesWritable, ImmutableBytesWritable) -
Method in class org.apache.hadoop.hbase.HTableDescriptor
-
- setValue(String, String) -
Method in class org.apache.hadoop.hbase.HTableDescriptor
- Setter for storing metadata as a (key, value) pair in
HTableDescriptor.values map
- setValue(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell.Builder
optional bytes value = 6;
- setValue(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.CellProtos.KeyValue.Builder
optional bytes value = 6;
- setValue(HBaseProtos.NameBytesPair) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.Builder
required .NameBytesPair value = 2;
- setValue(HBaseProtos.NameBytesPair.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.Builder
required .NameBytesPair value = 2;
- setValue(HBaseProtos.NameBytesPair) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResult.Builder
optional .NameBytesPair value = 1;
- setValue(HBaseProtos.NameBytesPair.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResult.Builder
optional .NameBytesPair value = 1;
- setValue(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue.Builder
optional bytes value = 2;
- setValue(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos.ByteArrayComparable.Builder
optional bytes value = 1;
- setValue(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.Builder
optional bytes value = 2;
- setValue(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair.Builder
optional int64 value = 2;
- setValue(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder
required string value = 2;
- setValue(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder
required bytes value = 2;
- setValue(boolean) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.Builder
required bool value = 1;
- setValue(V) -
Method in class org.apache.hadoop.hbase.regionserver.LruHashMap.Entry
- Set the value of this entry.
- setValue(byte[]) -
Method in class org.apache.hadoop.hbase.rest.model.CellModel
-
- setValue(String) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.Builder
required string value = 2;
- setValue(String) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder
required string value = 2;
- setValue(long) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.atomicIncrement_args
- amount to increment by
- setValue(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.Mutation
-
- setValue(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.Mutation
-
- setValue(byte[]) -
Method in class org.apache.hadoop.hbase.thrift.generated.TCell
-
- setValue(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift.generated.TCell
-
- setValue(byte[]) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TColumnValue
-
- setValue(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TColumnValue
-
- setValue(byte[]) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndDelete_args
- the expected value, if not provided the
check is for the non-existence of the
column in question
- setValue(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndDelete_args
-
- setValue(byte[]) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndPut_args
- the expected value, if not provided the
check is for the non-existence of the
column in question
- setValue(ByteBuffer) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndPut_args
-
- setValue(long) -
Method in class org.apache.hadoop.hbase.util.ManualEnvironmentEdge
-
- setValueBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder
required string value = 2;
- setValueBytes(ByteString) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.Builder
required string value = 2;
- setValueBytes(ByteString) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder
required string value = 2;
- setValueIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Hbase.atomicIncrement_args
-
- setValueIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Mutation
-
- setValueIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.TCell
-
- setValueIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TColumnValue
-
- setValueIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndDelete_args
-
- setValueIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndPut_args
-
- setValueLength(int) -
Method in class org.apache.hadoop.hbase.util.test.RedundantKVGenerator
-
- setValueLengthWidth(int) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta
-
- setValueOffsetWidth(int) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta
-
- setValues(int, CellMessage.Cell) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row.Builder
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;
- setValues(int, CellMessage.Cell.Builder) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet.Row.Builder
repeated .org.apache.hadoop.hbase.rest.protobuf.generated.Cell values = 2;
- setVersion(int) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta
-
- setVersion(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent.Builder
required string version = 1;
- setVersion(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder
optional int32 version = 5;
- setVersion(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.Builder
required string version = 1;
- setVersion(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.Builder
optional int32 version = 1;
- setVersion(String) -
Method in class org.apache.hadoop.hbase.rest.model.StorageClusterVersionModel
-
- setVersion(byte) -
Method in class org.apache.hadoop.hbase.thrift.generated.TRegionInfo
-
- setVersion(FileSystem, Path) -
Static method in class org.apache.hadoop.hbase.util.FSUtils
- Sets version of file system
- setVersion(FileSystem, Path, int, int) -
Static method in class org.apache.hadoop.hbase.util.FSUtils
- Sets version of file system
- setVersion(FileSystem, Path, String, int, int) -
Static method in class org.apache.hadoop.hbase.util.FSUtils
- Sets version of file system
- setVersionBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent.Builder
required string version = 1;
- setVersionBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.Builder
required string version = 1;
- setVersionInfo(RPCProtos.VersionInfo) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ConnectionHeader.Builder
optional .VersionInfo version_info = 5;
- setVersionInfo(RPCProtos.VersionInfo.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ConnectionHeader.Builder
optional .VersionInfo version_info = 5;
- setVersionIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.TRegionInfo
-
- setVersionOfClosingNode(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest.Builder
optional uint32 version_of_closing_node = 2;
- setVersionOfOfflineNode(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo.Builder
optional uint32 version_of_offline_node = 2;
- setVersions(int, int) -
Method in class org.apache.hadoop.hbase.HColumnDescriptor
- Set minimum and maximum versions to keep
- setVersionsDeleted(long) -
Method in class org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse.Builder
optional uint64 versionsDeleted = 2;
- setVictimCache(BucketCache) -
Method in class org.apache.hadoop.hbase.io.hfile.LruBlockCache
-
- setVisLabel(int, VisibilityLabelsProtos.VisibilityLabel) -
Method in class org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsRequest.Builder
repeated .VisibilityLabel visLabel = 1;
- setVisLabel(int, VisibilityLabelsProtos.VisibilityLabel.Builder) -
Method in class org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsRequest.Builder
repeated .VisibilityLabel visLabel = 1;
- setWalEdit(int, WALEdit) -
Method in class org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress
- Sets the walEdit for the operation(Mutation) at the specified position.
- setWalName(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Builder
optional string wal_name = 5;
- setWalNameBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Builder
optional string wal_name = 5;
- setWalServer(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Builder
optional string wal_server = 4;
- setWalServerBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Builder
optional string wal_server = 4;
- setWALTrailer(WALProtos.WALTrailer) -
Method in interface org.apache.hadoop.hbase.regionserver.wal.HLog.Writer
- Sets HLog's WALTrailer.
- setWALTrailer(WALProtos.WALTrailer) -
Method in class org.apache.hadoop.hbase.regionserver.wal.ProtobufLogWriter
-
- setWatchIfNodeExists(ZooKeeperWatcher, String) -
Static method in class org.apache.hadoop.hbase.zookeeper.ZKUtil
- Watch the specified znode, but only if exists.
- setWebuiPort(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo.Builder
optional uint32 webui_port = 2;
- setWriteBufferSize(long) -
Method in class org.apache.hadoop.hbase.client.HTable
- Sets the size of the buffer in bytes.
- setWriteBufferSize(long) -
Method in interface org.apache.hadoop.hbase.client.HTableInterface
- Sets the size of the buffer in bytes.
- setWriteBufferSize(long) -
Method in class org.apache.hadoop.hbase.rest.client.RemoteHTable
-
- setWriterClsName(String) -
Method in class org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALHeader.Builder
optional string writer_cls_name = 4;
- setWriterClsNameBytes(ByteString) -
Method in class org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALHeader.Builder
optional string writer_cls_name = 4;
- setWriteRequestsCount(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder
optional uint64 write_requests_count = 9;
- setWriteRequestsCount(long) -
Method in class org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel.Node.Region
-
- setWriteRequestsCount(long) -
Method in class org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder
optional int64 writeRequestsCount = 8;
- setWriteTime(long) -
Method in class org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.Builder
required uint64 write_time = 4;
- setWriteToWAL(boolean) -
Method in class org.apache.hadoop.hbase.client.Mutation
- Deprecated. Use
Mutation.setDurability(Durability) instead.
- setWriteToWAL(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Mutation
-
- setWriteToWALIsSet(boolean) -
Method in class org.apache.hadoop.hbase.thrift.generated.Mutation
-
- setYetToUpdateRegions(int) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse.Builder
optional uint32 yet_to_update_regions = 1;
- SEVENTY_FIFTH_PERCENTILE_METRIC_NAME -
Static variable in class org.apache.hadoop.hbase.metrics.histogram.MetricsHistogram
- Deprecated.
- SEVENTY_FIFTH_PERCENTILE_METRIC_NAME -
Static variable in interface org.apache.hadoop.metrics2.MetricHistogram
-
- shallowCopy() -
Method in class org.apache.hadoop.hbase.KeyValue
- Creates a shallow copy of this KeyValue, reusing the data byte buffer.
- shallowCopy() -
Method in interface org.apache.hadoop.hbase.util.ByteRange
- Create a new
ByteRange that points at this range's byte[].
- shallowCopy() -
Method in interface org.apache.hadoop.hbase.util.PositionedByteRange
-
- shallowCopy() -
Method in class org.apache.hadoop.hbase.util.SimpleByteRange
-
- shallowCopy() -
Method in class org.apache.hadoop.hbase.util.SimplePositionedByteRange
-
- shallowCopySubRange(int, int) -
Method in interface org.apache.hadoop.hbase.util.ByteRange
- Create a new
ByteRange that points at this range's byte[].
- shallowCopySubRange(int, int) -
Method in interface org.apache.hadoop.hbase.util.PositionedByteRange
-
- shallowCopySubRange(int, int) -
Method in class org.apache.hadoop.hbase.util.SimpleByteRange
-
- shallowCopySubRange(int, int) -
Method in class org.apache.hadoop.hbase.util.SimplePositionedByteRange
-
- shipBatch(long, int) -
Method in class org.apache.hadoop.hbase.replication.regionserver.MetricsSource
- Convience method to apply changes to metrics do to shipping a batch of logs.
- shipEdits(boolean, List<HLog.Entry>) -
Method in class org.apache.hadoop.hbase.replication.regionserver.ReplicationSource
- Do the shipping logic
- shouldArchiveTable(String) -
Method in class org.apache.hadoop.hbase.backup.example.HFileArchiveTableMonitor
- Determine if the given table should or should not allow its hfiles to be deleted in the archive
- shouldBypass() -
Method in class org.apache.hadoop.hbase.coprocessor.ObserverContext
- For use by the coprocessor framework.
- shouldCacheBlockOnRead(BlockType.BlockCategory) -
Method in class org.apache.hadoop.hbase.io.hfile.CacheConfig
- Should we cache a block of a particular category? We always cache
important blocks such as index blocks, as long as the block cache is
available.
- shouldCacheBloomsOnWrite() -
Method in class org.apache.hadoop.hbase.HColumnDescriptor
-
- shouldCacheBloomsOnWrite() -
Method in class org.apache.hadoop.hbase.io.hfile.CacheConfig
-
- shouldCacheCompressed(BlockType.BlockCategory) -
Method in class org.apache.hadoop.hbase.io.hfile.CacheConfig
-
- shouldCacheDataCompressed() -
Method in class org.apache.hadoop.hbase.io.hfile.CacheConfig
-
- shouldCacheDataOnRead() -
Method in class org.apache.hadoop.hbase.io.hfile.CacheConfig
- Returns whether the blocks of this HFile should be cached on read or not.
- shouldCacheDataOnWrite() -
Method in class org.apache.hadoop.hbase.HColumnDescriptor
-
- shouldCacheDataOnWrite() -
Method in class org.apache.hadoop.hbase.io.hfile.CacheConfig
-
- shouldCacheIndexesOnWrite() -
Method in class org.apache.hadoop.hbase.HColumnDescriptor
-
- shouldCacheIndexesOnWrite() -
Method in class org.apache.hadoop.hbase.io.hfile.CacheConfig
-
- shouldCloseConnection -
Variable in class org.apache.hadoop.hbase.ipc.RpcClient.Connection
-
- shouldComplete() -
Method in class org.apache.hadoop.hbase.coprocessor.ObserverContext
- For use by the coprocessor framework.
- shouldCompressTags() -
Method in class org.apache.hadoop.hbase.HColumnDescriptor
-
- shouldEvictBlocksOnClose() -
Method in class org.apache.hadoop.hbase.HColumnDescriptor
-
- shouldEvictOnClose() -
Method in class org.apache.hadoop.hbase.io.hfile.CacheConfig
-
- shouldFixVersionFile() -
Method in class org.apache.hadoop.hbase.util.HBaseFsck
-
- shouldIgnorePreCheckPermission() -
Method in class org.apache.hadoop.hbase.util.HBaseFsck
-
- shouldIncludeMemstoreTS() -
Method in class org.apache.hadoop.hbase.io.hfile.HFileReaderV2
-
- shouldPrefetchBlocksOnOpen() -
Method in class org.apache.hadoop.hbase.HColumnDescriptor
-
- shouldPrefetchOnOpen() -
Method in class org.apache.hadoop.hbase.io.hfile.CacheConfig
-
- shouldRetry() -
Method in class org.apache.hadoop.hbase.util.RetryCounter
-
- shouldSeek(Scan, long) -
Method in class org.apache.hadoop.hbase.regionserver.MemStore
- Check if this memstore may contain the required keys
- shouldSidelineBigOverlaps() -
Method in class org.apache.hadoop.hbase.util.HBaseFsck
-
- shouldSplit() -
Method in class org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy
-
- shouldSplit() -
Method in class org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy
-
- shouldSplit() -
Method in class org.apache.hadoop.hbase.regionserver.IncreasingToUpperBoundRegionSplitPolicy
-
- shouldSplit() -
Method in class org.apache.hadoop.hbase.regionserver.RegionSplitPolicy
-
- shouldSplitHlog -
Variable in class org.apache.hadoop.hbase.master.handler.ServerShutdownHandler
-
- shouldUseHBaseChecksum() -
Method in class org.apache.hadoop.hbase.io.FSDataInputStreamWrapper
-
- shouldUseScanner(Scan, SortedSet<byte[]>, long) -
Method in interface org.apache.hadoop.hbase.regionserver.KeyValueScanner
- Allows to filter out scanners (both StoreFile and memstore) that we don't
want to use based on criteria such as Bloom filters and timestamp ranges.
- shouldUseScanner(Scan, SortedSet<byte[]>, long) -
Method in class org.apache.hadoop.hbase.regionserver.MemStore.MemStoreScanner
-
- shouldUseScanner(Scan, SortedSet<byte[]>, long) -
Method in class org.apache.hadoop.hbase.regionserver.NonLazyKeyValueScanner
-
- shouldUseScanner(Scan, SortedSet<byte[]>, long) -
Method in class org.apache.hadoop.hbase.regionserver.StoreFileScanner
-
- shouldWriteBlock(boolean) -
Method in class org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexWriter
- Whether there is an inline block ready to be written.
- shouldWriteBlock(boolean) -
Method in interface org.apache.hadoop.hbase.io.hfile.InlineBlockWriter
- Determines whether there is a new block to be written out.
- shouldWriteBlock(boolean) -
Method in class org.apache.hadoop.hbase.util.CompoundBloomFilterWriter
-
- shutdown() -
Method in class org.apache.hadoop.hbase.client.HBaseAdmin
- Shuts down the HBase cluster
- shutdown() -
Method in class org.apache.hadoop.hbase.coprocessor.CoprocessorHost.Environment
- Clean up the environment
- shutdown(CoprocessorEnvironment) -
Method in class org.apache.hadoop.hbase.coprocessor.CoprocessorHost
-
- shutdown() -
Method in class org.apache.hadoop.hbase.executor.ExecutorService
-
- shutdown() -
Method in interface org.apache.hadoop.hbase.io.hfile.BlockCache
- Shutdown the cache.
- shutdown() -
Method in class org.apache.hadoop.hbase.io.hfile.bucket.BucketCache
-
- shutdown() -
Method in class org.apache.hadoop.hbase.io.hfile.bucket.ByteBufferIOEngine
- No operation for the shutdown in the memory IO engine
- shutdown() -
Method in class org.apache.hadoop.hbase.io.hfile.bucket.FileIOEngine
- Close the file
- shutdown() -
Method in interface org.apache.hadoop.hbase.io.hfile.bucket.IOEngine
- Shutdown the IOEngine
- shutdown() -
Method in class org.apache.hadoop.hbase.io.hfile.CombinedBlockCache
-
- shutdown() -
Method in class org.apache.hadoop.hbase.io.hfile.DoubleBlockCache
- Deprecated.
- shutdown() -
Method in class org.apache.hadoop.hbase.io.hfile.LruBlockCache
-
- shutdown() -
Method in class org.apache.hadoop.hbase.io.hfile.slab.SingleSizeCache
- Deprecated.
- shutdown() -
Method in class org.apache.hadoop.hbase.io.hfile.slab.SlabCache
- Deprecated. Sends a shutdown to all SingleSizeCache's contained by this cache.
- shutdown() -
Method in class org.apache.hadoop.hbase.LocalHBaseCluster
- Shut down the mini HBase cluster
- shutdown() -
Method in class org.apache.hadoop.hbase.master.AssignmentManager
- Shutdown the threadpool executor service
- shutdown() -
Method in class org.apache.hadoop.hbase.master.HMaster
-
- shutdown(RpcController, MasterProtos.ShutdownRequest) -
Method in class org.apache.hadoop.hbase.master.HMaster
-
- shutdown(RpcController, MasterProtos.ShutdownRequest) -
Method in interface org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService.BlockingInterface
-
- shutdown(RpcController, MasterProtos.ShutdownRequest, RpcCallback<MasterProtos.ShutdownResponse>) -
Method in interface org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService.Interface
rpc Shutdown(.ShutdownRequest) returns (.ShutdownResponse);
- shutdown(RpcController, MasterProtos.ShutdownRequest, RpcCallback<MasterProtos.ShutdownResponse>) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService
rpc Shutdown(.ShutdownRequest) returns (.ShutdownResponse);
- shutdown(RpcController, MasterProtos.ShutdownRequest, RpcCallback<MasterProtos.ShutdownResponse>) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService.Stub
-
- shutdown() -
Method in class org.apache.hadoop.hbase.rest.client.Client
- Shut down the client.
- shutdown() -
Method in class org.apache.hadoop.hbase.thrift.ThriftServerRunner
-
- shutdown(List<JVMClusterUtil.MasterThread>, List<JVMClusterUtil.RegionServerThread>) -
Static method in class org.apache.hadoop.hbase.util.JVMClusterUtil
-
- shutdown() -
Method in class org.apache.hadoop.hbase.util.MetaUtils
- Closes catalog regions if open.
- shutdown(Thread) -
Static method in class org.apache.hadoop.hbase.util.Threads
- Shutdown passed thread using isAlive and join.
- shutdown(Thread, long) -
Static method in class org.apache.hadoop.hbase.util.Threads
- Shutdown passed thread using isAlive and join.
- shutdown() -
Method in class org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster
-
- shutdownCluster() -
Method in class org.apache.hadoop.hbase.master.ServerManager
-
- ShutdownHook - Class in org.apache.hadoop.hbase.regionserver
- Manage regionserver shutdown hooks.
- ShutdownHook() -
Constructor for class org.apache.hadoop.hbase.regionserver.ShutdownHook
-
- ShutdownHookManager - Class in org.apache.hadoop.hbase.util
- This class provides ShutdownHookManager shims for HBase to interact with the Hadoop 1.0.x and the
Hadoop 2.0+ series.
- ShutdownHookManager() -
Constructor for class org.apache.hadoop.hbase.util.ShutdownHookManager
-
- SIGNATURE_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription
-
- SimpleByteRange - Class in org.apache.hadoop.hbase.util
- A basic
ByteRange implementation. - SimpleByteRange() -
Constructor for class org.apache.hadoop.hbase.util.SimpleByteRange
- Create a new
ByteRange lacking a backing array and with an
undefined viewport.
- SimpleByteRange(int) -
Constructor for class org.apache.hadoop.hbase.util.SimpleByteRange
- Create a new
ByteRange over a new backing array of size
capacity.
- SimpleByteRange(byte[]) -
Constructor for class org.apache.hadoop.hbase.util.SimpleByteRange
- Create a new
ByteRange over the provided bytes.
- SimpleByteRange(byte[], int, int) -
Constructor for class org.apache.hadoop.hbase.util.SimpleByteRange
- Create a new
ByteRange over the provided bytes.
- SimpleLoadBalancer - Class in org.apache.hadoop.hbase.master.balancer
- Makes decisions about the placement and movement of Regions across
RegionServers.
- SimpleLoadBalancer() -
Constructor for class org.apache.hadoop.hbase.master.balancer.SimpleLoadBalancer
-
- SimplePositionedByteRange - Class in org.apache.hadoop.hbase.util
- Extends the basic
SimpleByteRange implementation with position
support. - SimplePositionedByteRange() -
Constructor for class org.apache.hadoop.hbase.util.SimplePositionedByteRange
- Create a new
PositionedByteRange lacking a backing array and with
an undefined viewport.
- SimplePositionedByteRange(int) -
Constructor for class org.apache.hadoop.hbase.util.SimplePositionedByteRange
- Create a new
PositionedByteRange over a new backing array of
size capacity.
- SimplePositionedByteRange(byte[]) -
Constructor for class org.apache.hadoop.hbase.util.SimplePositionedByteRange
- Create a new
PositionedByteRange over the provided bytes.
- SimplePositionedByteRange(byte[], int, int) -
Constructor for class org.apache.hadoop.hbase.util.SimplePositionedByteRange
- Create a new
PositionedByteRange over the provided bytes.
- SimpleRpcScheduler - Class in org.apache.hadoop.hbase.ipc
- A scheduler that maintains isolated handler pools for general, high-priority and replication
requests.
- SimpleRpcScheduler(Configuration, int, int, int, PriorityFunction, Abortable, int) -
Constructor for class org.apache.hadoop.hbase.ipc.SimpleRpcScheduler
-
- SimpleRpcSchedulerFactory - Class in org.apache.hadoop.hbase.regionserver
- Constructs a
SimpleRpcScheduler. - SimpleRpcSchedulerFactory() -
Constructor for class org.apache.hadoop.hbase.regionserver.SimpleRpcSchedulerFactory
-
- SimpleScanLabelGenerator - Class in org.apache.hadoop.hbase.security.visibility
- This is a simple implementation for ScanLabelGenerator.
- SimpleScanLabelGenerator() -
Constructor for class org.apache.hadoop.hbase.security.visibility.SimpleScanLabelGenerator
-
- SimpleTotalOrderPartitioner<VALUE> - Class in org.apache.hadoop.hbase.mapreduce
- A partitioner that takes start and end keys and uses bigdecimal to figure
which reduce a key belongs to.
- SimpleTotalOrderPartitioner() -
Constructor for class org.apache.hadoop.hbase.mapreduce.SimpleTotalOrderPartitioner
-
- SINGLE_COLUMN_VALUE_FILTER_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.SingleColumnValueExcludeFilter
-
- SINGLE_QUOTE -
Static variable in class org.apache.hadoop.hbase.filter.ParseConstants
- ASCII code for a single quote
- SingleColumnValueExcludeFilter - Class in org.apache.hadoop.hbase.filter
- A
Filter that checks a single column value, but does not emit the
tested column. - SingleColumnValueExcludeFilter(byte[], byte[], CompareFilter.CompareOp, byte[]) -
Constructor for class org.apache.hadoop.hbase.filter.SingleColumnValueExcludeFilter
- Constructor for binary compare of the value of a single column.
- SingleColumnValueExcludeFilter(byte[], byte[], CompareFilter.CompareOp, ByteArrayComparable) -
Constructor for class org.apache.hadoop.hbase.filter.SingleColumnValueExcludeFilter
- Constructor for binary compare of the value of a single column.
- SingleColumnValueExcludeFilter(byte[], byte[], CompareFilter.CompareOp, ByteArrayComparable, boolean, boolean) -
Constructor for class org.apache.hadoop.hbase.filter.SingleColumnValueExcludeFilter
- Constructor for protobuf deserialization only.
- SingleColumnValueFilter - Class in org.apache.hadoop.hbase.filter
- This filter is used to filter cells based on value.
- SingleColumnValueFilter(byte[], byte[], CompareFilter.CompareOp, byte[]) -
Constructor for class org.apache.hadoop.hbase.filter.SingleColumnValueFilter
- Constructor for binary compare of the value of a single column.
- SingleColumnValueFilter(byte[], byte[], CompareFilter.CompareOp, ByteArrayComparable) -
Constructor for class org.apache.hadoop.hbase.filter.SingleColumnValueFilter
- Constructor for binary compare of the value of a single column.
- SingleColumnValueFilter(byte[], byte[], CompareFilter.CompareOp, ByteArrayComparable, boolean, boolean) -
Constructor for class org.apache.hadoop.hbase.filter.SingleColumnValueFilter
- Constructor for protobuf deserialization only.
- SingleSizeCache - Class in org.apache.hadoop.hbase.io.hfile.slab
- Deprecated. As of 1.0, replaced by
BucketCache. - SingleSizeCache(int, int, SlabItemActionWatcher) -
Constructor for class org.apache.hadoop.hbase.io.hfile.slab.SingleSizeCache
- Deprecated. Default constructor.
- SingletonCoprocessorService - Interface in org.apache.hadoop.hbase.coprocessor
- Coprocessor endpoints registered once per server and providing protobuf services should implement
this interface and return the
Service instance via SingletonCoprocessorService.getService(). - sink -
Variable in class org.apache.hadoop.hbase.tool.Canary.Monitor
-
- SINK_AGE_OF_LAST_APPLIED_OP -
Static variable in interface org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSinkSource
-
- SINK_AGE_OF_LAST_APPLIED_OP -
Static variable in class org.apache.hadoop.hbase.replication.regionserver.MetricsSink
-
- SINK_APPLIED_BATCHES -
Static variable in interface org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSinkSource
-
- SINK_APPLIED_BATCHES -
Static variable in class org.apache.hadoop.hbase.replication.regionserver.MetricsSink
-
- SINK_APPLIED_OPS -
Static variable in interface org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSinkSource
-
- SINK_APPLIED_OPS -
Static variable in class org.apache.hadoop.hbase.replication.regionserver.MetricsSink
-
- sinkToString() -
Method in class org.apache.hadoop.hbase.replication.regionserver.ReplicationLoad
- sinkToString
- size() -
Method in class org.apache.hadoop.hbase.client.MultiAction
- Get the total number of Actions
- size() -
Method in class org.apache.hadoop.hbase.client.MultiResponse
-
- size() -
Method in class org.apache.hadoop.hbase.client.Mutation
- Number of KeyValues carried by this Mutation.
- size() -
Method in class org.apache.hadoop.hbase.client.Result
-
- size() -
Method in class org.apache.hadoop.hbase.CompoundConfiguration
-
- size() -
Method in class org.apache.hadoop.hbase.io.ByteBufferOutputStream
-
- size() -
Method in interface org.apache.hadoop.hbase.io.hfile.BlockCache
- Returns the total size of the block cache, in bytes.
- size() -
Method in class org.apache.hadoop.hbase.io.hfile.bucket.BucketCache
-
- size() -
Method in class org.apache.hadoop.hbase.io.hfile.CombinedBlockCache
-
- size() -
Method in class org.apache.hadoop.hbase.io.hfile.DoubleBlockCache
- Deprecated.
- size() -
Method in class org.apache.hadoop.hbase.io.hfile.HFile.FileInfo
-
- size() -
Method in class org.apache.hadoop.hbase.io.hfile.LruBlockCache
-
- size() -
Method in class org.apache.hadoop.hbase.io.hfile.slab.SingleSizeCache
- Deprecated.
- size() -
Method in class org.apache.hadoop.hbase.io.hfile.slab.SlabCache
- Deprecated.
- size() -
Method in class org.apache.hadoop.hbase.master.DeadServer
-
- size() -
Method in class org.apache.hadoop.hbase.regionserver.KeyValueSkipListSet
-
- size() -
Method in class org.apache.hadoop.hbase.regionserver.LruHashMap
- Gets the size (number of entries) of the map.
- size() -
Method in class org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress
-
- size() -
Method in class org.apache.hadoop.hbase.regionserver.wal.WALEdit
-
- size() -
Method in class org.apache.hadoop.hbase.thrift.CallQueue
-
- size() -
Method in class org.apache.hadoop.hbase.util.BoundedArrayQueue
-
- size() -
Method in class org.apache.hadoop.hbase.util.BoundedConcurrentLinkedQueue
-
- size() -
Method in class org.apache.hadoop.hbase.util.byterange.ByteRangeSet
-
- size -
Variable in class org.apache.hadoop.hbase.util.ConcatenatedLists
-
- size() -
Method in class org.apache.hadoop.hbase.util.ConcatenatedLists
-
- size() -
Method in interface org.apache.hadoop.hbase.util.PoolMap.Pool
-
- size() -
Method in class org.apache.hadoop.hbase.util.PoolMap
-
- size(K) -
Method in class org.apache.hadoop.hbase.util.PoolMap
-
- size() -
Method in class org.apache.hadoop.hbase.util.SortedCopyOnWriteSet
-
- SIZE_TO_SPLIT_KEY -
Static variable in class org.apache.hadoop.hbase.regionserver.StripeStoreConfig
- The size the stripe should achieve to be considered for splitting into multiple stripes.
- SIZE_VALUE_NAME -
Static variable in interface org.apache.hadoop.hbase.regionserver.MetricsRegionSource
-
- SizedCellScanner - Interface in org.apache.hadoop.hbase.io
- A CellScanner that knows its size in memory in bytes.
- sizeIndex() -
Method in class org.apache.hadoop.hbase.io.hfile.bucket.BucketAllocator.Bucket
-
- sizeIndexOfAllocation(long) -
Method in class org.apache.hadoop.hbase.io.hfile.bucket.BucketAllocator
-
- SIZEOF_BOOLEAN -
Static variable in class org.apache.hadoop.hbase.util.Bytes
- Size of boolean in bytes
- SIZEOF_BYTE -
Static variable in class org.apache.hadoop.hbase.util.Bytes
- Size of byte in bytes
- SIZEOF_CHAR -
Static variable in class org.apache.hadoop.hbase.util.Bytes
- Size of char in bytes
- SIZEOF_DOUBLE -
Static variable in class org.apache.hadoop.hbase.util.Bytes
- Size of double in bytes
- SIZEOF_FLOAT -
Static variable in class org.apache.hadoop.hbase.util.Bytes
- Size of float in bytes
- SIZEOF_INT -
Static variable in class org.apache.hadoop.hbase.util.Bytes
- Size of int in bytes
- SIZEOF_LONG -
Static variable in class org.apache.hadoop.hbase.util.Bytes
- Size of long in bytes
- SIZEOF_SHORT -
Static variable in class org.apache.hadoop.hbase.util.Bytes
- Size of short in bytes
- sizeOfAllocation(long) -
Method in class org.apache.hadoop.hbase.io.hfile.bucket.BucketAllocator
-
- SIZEOFLOGQUEUE_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource
-
- skip(long) -
Method in class org.apache.hadoop.hbase.io.LimitInputStream
-
- skip(PositionedByteRange) -
Method in interface org.apache.hadoop.hbase.types.DataType
- Skip
src's position forward over one encoded value.
- skip(PositionedByteRange) -
Method in class org.apache.hadoop.hbase.types.FixedLengthWrapper
-
- skip(PositionedByteRange) -
Method in class org.apache.hadoop.hbase.types.OrderedBytesBase
-
- skip(PositionedByteRange) -
Method in class org.apache.hadoop.hbase.types.PBCell
-
- skip(PositionedByteRange) -
Method in class org.apache.hadoop.hbase.types.RawByte
-
- skip(PositionedByteRange) -
Method in class org.apache.hadoop.hbase.types.RawBytes
-
- skip(PositionedByteRange) -
Method in class org.apache.hadoop.hbase.types.RawDouble
-
- skip(PositionedByteRange) -
Method in class org.apache.hadoop.hbase.types.RawFloat
-
- skip(PositionedByteRange) -
Method in class org.apache.hadoop.hbase.types.RawInteger
-
- skip(PositionedByteRange) -
Method in class org.apache.hadoop.hbase.types.RawLong
-
- skip(PositionedByteRange) -
Method in class org.apache.hadoop.hbase.types.RawShort
-
- skip(PositionedByteRange) -
Method in class org.apache.hadoop.hbase.types.RawString
-
- skip(PositionedByteRange) -
Method in class org.apache.hadoop.hbase.types.Struct
-
- skip() -
Method in class org.apache.hadoop.hbase.types.StructIterator
- Bypass the next encoded value.
- skip(PositionedByteRange) -
Method in class org.apache.hadoop.hbase.types.TerminatedWrapper
- Skip
src's position forward over one encoded value.
- skip(ByteBuffer, int) -
Static method in class org.apache.hadoop.hbase.util.ByteBufferUtils
- Increment position in buffer.
- skip(PositionedByteRange) -
Static method in class org.apache.hadoop.hbase.util.OrderedBytes
- Skip
buff's position forward over one encoded value.
- SKIP_ARRAY -
Static variable in class org.apache.hadoop.hbase.filter.ParseConstants
- SKIP Array
- SKIP_BUFFER -
Static variable in class org.apache.hadoop.hbase.filter.ParseConstants
-
- SKIP_LINES_CONF_KEY -
Static variable in class org.apache.hadoop.hbase.mapreduce.ImportTsv
-
- SKIP_WAL_VALUE -
Static variable in enum org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.Durability
SKIP_WAL = 1;
- SkipFilter - Class in org.apache.hadoop.hbase.filter
- A wrapper filter that filters an entire row if any of the Cell checks do
not pass.
- SkipFilter(Filter) -
Constructor for class org.apache.hadoop.hbase.filter.SkipFilter
-
- SKIPFLUSH_VALUE -
Static variable in enum org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type
SKIPFLUSH = 2;
- skipKVsNewerThanReadpoint() -
Method in class org.apache.hadoop.hbase.regionserver.StoreFileScanner
-
- skipSleepCycle() -
Method in class org.apache.hadoop.hbase.util.Sleeper
- If currently asleep, stops sleeping; if not asleep, will skip the next
sleep cycle.
- skipStoreFileRangeCheck() -
Method in class org.apache.hadoop.hbase.regionserver.RegionSplitPolicy
- Deprecated. Use
RegionSplitPolicy.skipStoreFileRangeCheck(String)} instead
- skipStoreFileRangeCheck(String) -
Method in class org.apache.hadoop.hbase.regionserver.RegionSplitPolicy
- See
RegionSplitPolicy.skipStoreFileRangeCheck() javadoc.
- SlabCache - Class in org.apache.hadoop.hbase.io.hfile.slab
- Deprecated. As of 1.0, replaced by
BucketCache. - SlabCache(long, long) -
Constructor for class org.apache.hadoop.hbase.io.hfile.slab.SlabCache
- Deprecated. Default constructor, creates an empty SlabCache.
- sleep() -
Method in class org.apache.hadoop.hbase.Chore
- Sleep for period.
- sleep(long, int) -
Method in class org.apache.hadoop.hbase.client.DelegatingRetryingCallable
-
- sleep(long, int) -
Method in class org.apache.hadoop.hbase.client.RegionServerCallable
-
- sleep(long, int) -
Method in interface org.apache.hadoop.hbase.client.RetryingCallable
-
- sleep() -
Method in class org.apache.hadoop.hbase.util.Sleeper
- Sleep for period.
- sleep(long) -
Method in class org.apache.hadoop.hbase.util.Sleeper
- Sleep for period adjusted by passed
startTime
- sleep(long) -
Static method in class org.apache.hadoop.hbase.util.Threads
- If interrupted, just prints out the interrupt on STDOUT, resets interrupt and returns
- Sleeper - Class in org.apache.hadoop.hbase.util
- Sleeper for current thread.
- Sleeper(int, Stoppable) -
Constructor for class org.apache.hadoop.hbase.util.Sleeper
-
- sleepForRetries(String, int) -
Method in class org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint
- Do the sleeping logic
- sleepForRetries(String, int) -
Method in class org.apache.hadoop.hbase.replication.regionserver.ReplicationSource
- Do the sleeping logic
- sleepUntilNextRetry() -
Method in class org.apache.hadoop.hbase.util.RetryCounter
- Sleep for a back off time as supplied by the backoff policy, and increases the attempts
- sleepWithoutInterrupt(long) -
Static method in class org.apache.hadoop.hbase.util.Threads
- Sleeps for the given amount of time even if interrupted.
- slop -
Variable in class org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer
-
- SLOW_APPEND_COUNT -
Static variable in interface org.apache.hadoop.hbase.regionserver.wal.MetricsWALSource
-
- SLOW_APPEND_COUNT_DESC -
Static variable in interface org.apache.hadoop.hbase.regionserver.wal.MetricsWALSource
-
- SLOW_APPEND_DESC -
Static variable in interface org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource
-
- SLOW_APPEND_KEY -
Static variable in interface org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource
-
- SLOW_DELETE_DESC -
Static variable in interface org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource
-
- SLOW_DELETE_KEY -
Static variable in interface org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource
-
- SLOW_GET_DESC -
Static variable in interface org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource
-
- SLOW_GET_KEY -
Static variable in interface org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource
-
- SLOW_INCREMENT_DESC -
Static variable in interface org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource
-
- SLOW_INCREMENT_KEY -
Static variable in interface org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource
-
- SLOW_MUTATE_DESC -
Static variable in interface org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource
-
- SLOW_MUTATE_KEY -
Static variable in interface org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource
-
- SLOW_RESPONSE_NANO_SEC -
Static variable in class org.apache.hadoop.hbase.thrift.ThriftMetrics
-
- SLOW_THRIFT_CALL_KEY -
Static variable in interface org.apache.hadoop.hbase.thrift.MetricsThriftServerSource
-
- SMALL_COMPACTION_QUEUE_LENGTH -
Static variable in interface org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource
-
- SMALL_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan
-
- snapshot(String, TableName) -
Method in class org.apache.hadoop.hbase.client.HBaseAdmin
- Take a snapshot for the given table.
- snapshot(String, String) -
Method in class org.apache.hadoop.hbase.client.HBaseAdmin
-
- snapshot(byte[], byte[], HBaseProtos.SnapshotDescription.Type) -
Method in class org.apache.hadoop.hbase.client.HBaseAdmin
- Create snapshot for the given table of given flush type.
- snapshot(byte[], TableName) -
Method in class org.apache.hadoop.hbase.client.HBaseAdmin
- public void snapshot(final String snapshotName,
Create a timestamp consistent snapshot for the given table.
- snapshot(byte[], byte[]) -
Method in class org.apache.hadoop.hbase.client.HBaseAdmin
-
- snapshot(String, TableName, HBaseProtos.SnapshotDescription.Type) -
Method in class org.apache.hadoop.hbase.client.HBaseAdmin
- Create typed snapshot of the table.
- snapshot(String, String, HBaseProtos.SnapshotDescription.Type) -
Method in class org.apache.hadoop.hbase.client.HBaseAdmin
-
- snapshot(String, byte[], HBaseProtos.SnapshotDescription.Type) -
Method in class org.apache.hadoop.hbase.client.HBaseAdmin
-
- snapshot(HBaseProtos.SnapshotDescription) -
Method in class org.apache.hadoop.hbase.client.HBaseAdmin
- Take a snapshot and wait for the server to complete that snapshot (blocking).
- snapshot(RpcController, MasterProtos.SnapshotRequest) -
Method in class org.apache.hadoop.hbase.master.HMaster
- Triggers an asynchronous attempt to take a snapshot.
- snapshot -
Variable in class org.apache.hadoop.hbase.master.snapshot.TakeSnapshotHandler
-
- snapshot(RpcController, MasterProtos.SnapshotRequest) -
Method in interface org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService.BlockingInterface
-
- snapshot(RpcController, MasterProtos.SnapshotRequest, RpcCallback<MasterProtos.SnapshotResponse>) -
Method in interface org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService.Interface
rpc Snapshot(.SnapshotRequest) returns (.SnapshotResponse);
- snapshot(RpcController, MasterProtos.SnapshotRequest, RpcCallback<MasterProtos.SnapshotResponse>) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService
rpc Snapshot(.SnapshotRequest) returns (.SnapshotResponse);
- snapshot(RpcController, MasterProtos.SnapshotRequest, RpcCallback<MasterProtos.SnapshotResponse>) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService.Stub
-
- snapshot(MetricsRecordBuilder, boolean) -
Method in class org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry
- Sample all the mutable metrics and put the snapshot in the builder
- snapshot(MetricsRecordBuilder, boolean) -
Method in class org.apache.hadoop.metrics2.lib.MetricMutableQuantiles
-
- snapshot(MetricsRecordBuilder, boolean) -
Method in class org.apache.hadoop.metrics2.lib.MutableHistogram
-
- snapshot() -
Method in class org.apache.hadoop.metrics2.util.MetricSampleQuantiles
- Get a snapshot of the current values of all the tracked quantiles.
- SNAPSHOT_CLONE_TIME_DESC -
Static variable in interface org.apache.hadoop.hbase.master.MetricsSnapshotSource
-
- SNAPSHOT_CLONE_TIME_NAME -
Static variable in interface org.apache.hadoop.hbase.master.MetricsSnapshotSource
-
- SNAPSHOT_DIR_NAME -
Static variable in class org.apache.hadoop.hbase.HConstants
- Name of the directory to store all snapshots.
- SNAPSHOT_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest
-
- SNAPSHOT_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse
-
- SNAPSHOT_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest
-
- SNAPSHOT_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest
-
- SNAPSHOT_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse
-
- SNAPSHOT_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest
-
- SNAPSHOT_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest
-
- SNAPSHOT_LAYOUT_CONF_KEY -
Static variable in class org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils
- Version of the fs layout for new snapshot.
- SNAPSHOT_LAYOUT_LATEST_FORMAT -
Static variable in class org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils
-
- SNAPSHOT_REQUEST_THREADS_DEFAULT -
Static variable in class org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager
- # of threads for snapshotting regions on the rs.
- SNAPSHOT_REQUEST_THREADS_KEY -
Static variable in class org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager
- Conf key for number of request threads to start snapshots on regionservers
- SNAPSHOT_REQUEST_WAKE_MILLIS_KEY -
Static variable in class org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager
- Conf key for millis between checks to see if snapshot completed or if there are errors
- SNAPSHOT_RESTORE_TIME_DESC -
Static variable in interface org.apache.hadoop.hbase.master.MetricsSnapshotSource
-
- SNAPSHOT_RESTORE_TIME_NAME -
Static variable in interface org.apache.hadoop.hbase.master.MetricsSnapshotSource
-
- SNAPSHOT_TIME_DESC -
Static variable in interface org.apache.hadoop.hbase.master.MetricsSnapshotSource
-
- SNAPSHOT_TIME_NAME -
Static variable in interface org.apache.hadoop.hbase.master.MetricsSnapshotSource
-
- SNAPSHOT_TIMEOUT_MILLIS_DEFAULT -
Static variable in class org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager
- Keep threads alive in request pool for max of 60 seconds
- SNAPSHOT_TIMEOUT_MILLIS_KEY -
Static variable in class org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager
- Conf key for max time to keep threads in snapshot request pool waiting
- SNAPSHOT_TMP_DIR_NAME -
Static variable in class org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils
- Temporary directory under the snapshot directory to store in-progress snapshots
- SnapshotCreationException - Exception in org.apache.hadoop.hbase.snapshot
- Thrown when a snapshot could not be created due to a server-side error when
taking the snapshot.
- SnapshotCreationException(String) -
Constructor for exception org.apache.hadoop.hbase.snapshot.SnapshotCreationException
- Used internally by the RPC engine to pass the exception back to the client.
- SnapshotCreationException(String, HBaseProtos.SnapshotDescription) -
Constructor for exception org.apache.hadoop.hbase.snapshot.SnapshotCreationException
- Failure to create the specified snapshot
- SnapshotCreationException(String, Throwable, HBaseProtos.SnapshotDescription) -
Constructor for exception org.apache.hadoop.hbase.snapshot.SnapshotCreationException
- Failure to create the specified snapshot due to an external cause
- SnapshotDescriptionUtils - Class in org.apache.hadoop.hbase.snapshot
- Utility class to help manage
SnapshotDesriptions. - SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter - Class in org.apache.hadoop.hbase.snapshot
- Filter that only accepts completed snapshot directories
- SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(FileSystem) -
Constructor for class org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter
-
- snapshotDisabledRegion(HRegionInfo) -
Method in class org.apache.hadoop.hbase.master.snapshot.TakeSnapshotHandler
- Take a snapshot of the specified disabled region
- SnapshotDoesNotExistException - Exception in org.apache.hadoop.hbase.snapshot
- Thrown when the server is looking for a snapshot but can't find the snapshot on the filesystem
- SnapshotDoesNotExistException(String) -
Constructor for exception org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException
-
- SnapshotDoesNotExistException(HBaseProtos.SnapshotDescription) -
Constructor for exception org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException
-
- SnapshotExistsException - Exception in org.apache.hadoop.hbase.snapshot
- Thrown when a snapshot exists but should not
- SnapshotExistsException(String) -
Constructor for exception org.apache.hadoop.hbase.snapshot.SnapshotExistsException
-
- SnapshotExistsException(String, HBaseProtos.SnapshotDescription) -
Constructor for exception org.apache.hadoop.hbase.snapshot.SnapshotExistsException
- Failure due to the snapshot already existing
- SnapshotFileCache - Class in org.apache.hadoop.hbase.master.snapshot
- Intelligently keep track of all the files for all the snapshots.
- SnapshotFileCache(Configuration, long, String, SnapshotFileCache.SnapshotFileInspector) -
Constructor for class org.apache.hadoop.hbase.master.snapshot.SnapshotFileCache
- Create a snapshot file cache for all snapshots under the specified [root]/.snapshot on the
filesystem.
- SnapshotFileCache(FileSystem, Path, long, long, String, SnapshotFileCache.SnapshotFileInspector) -
Constructor for class org.apache.hadoop.hbase.master.snapshot.SnapshotFileCache
- Create a snapshot file cache for all snapshots under the specified [root]/.snapshot on the
filesystem
- SnapshotFileCache.RefreshCacheTask - Class in org.apache.hadoop.hbase.master.snapshot
- Simple helper task that just periodically attempts to refresh the cache
- SnapshotFileCache.RefreshCacheTask() -
Constructor for class org.apache.hadoop.hbase.master.snapshot.SnapshotFileCache.RefreshCacheTask
-
- SnapshotHFileCleaner - Class in org.apache.hadoop.hbase.master.snapshot
- Implementation of a file cleaner that checks if a hfile is still used by snapshots of HBase
tables.
- SnapshotHFileCleaner() -
Constructor for class org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner
-
- SnapshotInfo - Class in org.apache.hadoop.hbase.snapshot
- Tool for dumping snapshot information.
- SnapshotInfo() -
Constructor for class org.apache.hadoop.hbase.snapshot.SnapshotInfo
-
- SnapshotInfo.SnapshotStats - Class in org.apache.hadoop.hbase.snapshot
- Statistics about the snapshot
How many store files and logs are in the archive
How many store files and logs are shared with the table
Total store files and logs size and shared amount
- SNAPSHOTINFO_FILE -
Static variable in class org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils
- The file contains the snapshot basic information and it is under the directory of a snapshot.
- SnapshotLogCleaner - Class in org.apache.hadoop.hbase.master.snapshot
- Implementation of a log cleaner that checks if a log is still used by
snapshots of HBase tables.
- SnapshotLogCleaner() -
Constructor for class org.apache.hadoop.hbase.master.snapshot.SnapshotLogCleaner
-
- SnapshotManager - Class in org.apache.hadoop.hbase.master.snapshot
- This class manages the procedure of taking and restoring snapshots.
- SnapshotManager() -
Constructor for class org.apache.hadoop.hbase.master.snapshot.SnapshotManager
-
- SnapshotManager(MasterServices, MetricsMaster, ProcedureCoordinator, ExecutorService) -
Constructor for class org.apache.hadoop.hbase.master.snapshot.SnapshotManager
- Fully specify all necessary components of a snapshot manager.
- snapshotManifest -
Variable in class org.apache.hadoop.hbase.master.snapshot.TakeSnapshotHandler
-
- SnapshotManifest - Class in org.apache.hadoop.hbase.snapshot
- Utility class to help read/write the Snapshot Manifest.
- SnapshotManifestV1 - Class in org.apache.hadoop.hbase.snapshot
- DO NOT USE DIRECTLY.
- SnapshotManifestV2 - Class in org.apache.hadoop.hbase.snapshot
- DO NOT USE DIRECTLY.
- SnapshotManifestV2() -
Constructor for class org.apache.hadoop.hbase.snapshot.SnapshotManifestV2
-
- SnapshotOfRegionAssignmentFromMeta - Class in org.apache.hadoop.hbase.master
- Used internally for reading meta and constructing datastructures that are
then queried, for things like regions to regionservers, table to regions, etc.
- SnapshotOfRegionAssignmentFromMeta(CatalogTracker) -
Constructor for class org.apache.hadoop.hbase.master.SnapshotOfRegionAssignmentFromMeta
-
- SnapshotOfRegionAssignmentFromMeta(CatalogTracker, Set<TableName>, boolean) -
Constructor for class org.apache.hadoop.hbase.master.SnapshotOfRegionAssignmentFromMeta
-
- SnapshotProtos - Class in org.apache.hadoop.hbase.protobuf.generated
-
- SnapshotProtos.SnapshotDataManifest - Class in org.apache.hadoop.hbase.protobuf.generated
- Protobuf type
SnapshotDataManifest - SnapshotProtos.SnapshotDataManifest.Builder - Class in org.apache.hadoop.hbase.protobuf.generated
- Protobuf type
SnapshotDataManifest - SnapshotProtos.SnapshotDataManifestOrBuilder - Interface in org.apache.hadoop.hbase.protobuf.generated
-
- SnapshotProtos.SnapshotFileInfo - Class in org.apache.hadoop.hbase.protobuf.generated
- Protobuf type
SnapshotFileInfo - SnapshotProtos.SnapshotFileInfo.Builder - Class in org.apache.hadoop.hbase.protobuf.generated
- Protobuf type
SnapshotFileInfo - SnapshotProtos.SnapshotFileInfo.Type - Enum in org.apache.hadoop.hbase.protobuf.generated
- Protobuf enum
SnapshotFileInfo.Type - SnapshotProtos.SnapshotFileInfoOrBuilder - Interface in org.apache.hadoop.hbase.protobuf.generated
-
- SnapshotProtos.SnapshotRegionManifest - Class in org.apache.hadoop.hbase.protobuf.generated
- Protobuf type
SnapshotRegionManifest - SnapshotProtos.SnapshotRegionManifest.Builder - Class in org.apache.hadoop.hbase.protobuf.generated
- Protobuf type
SnapshotRegionManifest - SnapshotProtos.SnapshotRegionManifest.FamilyFiles - Class in org.apache.hadoop.hbase.protobuf.generated
- Protobuf type
SnapshotRegionManifest.FamilyFiles - SnapshotProtos.SnapshotRegionManifest.FamilyFiles.Builder - Class in org.apache.hadoop.hbase.protobuf.generated
- Protobuf type
SnapshotRegionManifest.FamilyFiles - SnapshotProtos.SnapshotRegionManifest.FamilyFilesOrBuilder - Interface in org.apache.hadoop.hbase.protobuf.generated
-
- SnapshotProtos.SnapshotRegionManifest.StoreFile - Class in org.apache.hadoop.hbase.protobuf.generated
- Protobuf type
SnapshotRegionManifest.StoreFile - SnapshotProtos.SnapshotRegionManifest.StoreFile.Builder - Class in org.apache.hadoop.hbase.protobuf.generated
- Protobuf type
SnapshotRegionManifest.StoreFile - SnapshotProtos.SnapshotRegionManifest.StoreFileOrBuilder - Interface in org.apache.hadoop.hbase.protobuf.generated
-
- SnapshotProtos.SnapshotRegionManifestOrBuilder - Interface in org.apache.hadoop.hbase.protobuf.generated
-
- SnapshotReferenceUtil - Class in org.apache.hadoop.hbase.snapshot
- Utility methods for interacting with the snapshot referenced files.
- SnapshotReferenceUtil.SnapshotVisitor - Interface in org.apache.hadoop.hbase.snapshot
-
- SnapshotReferenceUtil.StoreFileVisitor - Interface in org.apache.hadoop.hbase.snapshot
-
- snapshotRegions(List<Pair<HRegionInfo, ServerName>>) -
Method in class org.apache.hadoop.hbase.master.snapshot.DisabledTableSnapshotHandler
-
- snapshotRegions(List<Pair<HRegionInfo, ServerName>>) -
Method in class org.apache.hadoop.hbase.master.snapshot.EnabledTableSnapshotHandler
- This method kicks off a snapshot procedure.
- snapshotRegions(List<Pair<HRegionInfo, ServerName>>) -
Method in class org.apache.hadoop.hbase.master.snapshot.TakeSnapshotHandler
- Snapshot the specified regions
- SNAPSHOTS_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse
-
- SnapshotSentinel - Interface in org.apache.hadoop.hbase.master
- Watch the current snapshot under process
- snapshotTable -
Variable in class org.apache.hadoop.hbase.master.snapshot.TakeSnapshotHandler
-
- sniff(HConnection, TableName) -
Static method in class org.apache.hadoop.hbase.tool.Canary
- Canary entry point for specified table.
- socket -
Variable in class org.apache.hadoop.hbase.ipc.RpcClient.Connection
-
- socket -
Variable in class org.apache.hadoop.hbase.ipc.RpcServer.Connection
-
- SOCKET_RETRY_WAIT_MS -
Static variable in class org.apache.hadoop.hbase.HConstants
- The delay when re-trying a socket operation in a loop (HBASE-4712)
- SOCKET_TIMEOUT -
Static variable in class org.apache.hadoop.hbase.ipc.RpcClient
-
- socketFactory -
Variable in class org.apache.hadoop.hbase.ipc.RpcClient
-
- socketSendBufferSize -
Variable in class org.apache.hadoop.hbase.ipc.RpcServer
-
- solve() -
Method in class org.apache.hadoop.hbase.util.MunkresAssignment
- Get the optimal assignments.
- sortColumns -
Variable in class org.apache.hadoop.hbase.thrift.generated.TScan
-
- SORTED_ORDINAL_SERIALIZATION_FORMAT -
Static variable in class org.apache.hadoop.hbase.security.visibility.VisibilityConstants
- Visibility serialization version format.
- SORTED_ORDINAL_SERIALIZATION_FORMAT_TAG -
Static variable in class org.apache.hadoop.hbase.security.visibility.VisibilityUtils
-
- SORTED_ORDINAL_SERIALIZATION_FORMAT_TAG_VAL -
Static variable in class org.apache.hadoop.hbase.security.visibility.VisibilityConstants
- Byte representation of the visibility_serialization_version
- SORTED_PREFIXES_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.MultipleColumnPrefixFilter
-
- sortedColumns -
Variable in class org.apache.hadoop.hbase.thrift.generated.TRowResult
-
- SortedCopyOnWriteSet<E> - Class in org.apache.hadoop.hbase.util
- Simple
SortedSet implementation that uses an internal
TreeSet to provide ordering. - SortedCopyOnWriteSet() -
Constructor for class org.apache.hadoop.hbase.util.SortedCopyOnWriteSet
-
- SortedCopyOnWriteSet(Collection<? extends E>) -
Constructor for class org.apache.hadoop.hbase.util.SortedCopyOnWriteSet
-
- SortedCopyOnWriteSet(Comparator<? super E>) -
Constructor for class org.apache.hadoop.hbase.util.SortedCopyOnWriteSet
-
- sortedIndexByInsertionId -
Variable in class org.apache.hadoop.hbase.util.byterange.ByteRangeSet
-
- sortedIndexByUniqueIndex -
Variable in class org.apache.hadoop.hbase.util.byterange.ByteRangeSet
-
- sortedPrefixes -
Variable in class org.apache.hadoop.hbase.filter.MultipleColumnPrefixFilter
-
- sortedRanges -
Variable in class org.apache.hadoop.hbase.util.byterange.ByteRangeSet
-
- sortedUniqueValues -
Variable in class org.apache.hadoop.hbase.codec.prefixtree.encode.other.LongEncoder
-
- SOURCE_AGE_OF_LAST_SHIPPED_OP -
Static variable in interface org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceSource
-
- SOURCE_AGE_OF_LAST_SHIPPED_OP -
Static variable in class org.apache.hadoop.hbase.replication.regionserver.MetricsSource
-
- SOURCE_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage
-
- SOURCE_LOG_EDITS_FILTERED -
Static variable in interface org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceSource
-
- SOURCE_LOG_EDITS_FILTERED -
Static variable in class org.apache.hadoop.hbase.replication.regionserver.MetricsSource
-
- SOURCE_LOG_EDITS_READ -
Static variable in class org.apache.hadoop.hbase.replication.regionserver.MetricsSource
-
- SOURCE_LOG_READ_IN_BYTES -
Static variable in interface org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceSource
-
- SOURCE_LOG_READ_IN_BYTES -
Static variable in class org.apache.hadoop.hbase.replication.regionserver.MetricsSource
-
- SOURCE_LOG_READ_IN_EDITS -
Static variable in interface org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceSource
-
- SOURCE_SHIPPED_BATCHES -
Static variable in interface org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceSource
-
- SOURCE_SHIPPED_BATCHES -
Static variable in class org.apache.hadoop.hbase.replication.regionserver.MetricsSource
-
- SOURCE_SHIPPED_KBS -
Static variable in interface org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceSource
-
- SOURCE_SHIPPED_KBS -
Static variable in class org.apache.hadoop.hbase.replication.regionserver.MetricsSource
-
- SOURCE_SHIPPED_OPS -
Static variable in interface org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceSource
-
- SOURCE_SHIPPED_OPS -
Static variable in class org.apache.hadoop.hbase.replication.regionserver.MetricsSource
-
- SOURCE_SIZE_OF_LOG_QUEUE -
Static variable in interface org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceSource
-
- SOURCE_SIZE_OF_LOG_QUEUE -
Static variable in class org.apache.hadoop.hbase.replication.regionserver.MetricsSource
-
- sourceScanner -
Variable in class org.apache.hadoop.hbase.regionserver.StripeMultiFileWriter
- Source scanner that is tracking KV count; may be null if source is not StoreScanner
- sourceToString() -
Method in class org.apache.hadoop.hbase.replication.regionserver.ReplicationLoad
- sourceToString
- SPAN_RECEIVERS_CONF_KEY -
Static variable in class org.apache.hadoop.hbase.trace.SpanReceiverHost
-
- SpanReceiverHost - Class in org.apache.hadoop.hbase.trace
- This class provides functions for reading the names of SpanReceivers from
hbase-site.xml, adding those SpanReceivers to the Tracer, and closing those
SpanReceivers when appropriate.
- SPEC_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition
-
- split(String) -
Method in class org.apache.hadoop.hbase.client.HBaseAdmin
- Split a table or an individual region.
- split(byte[]) -
Method in class org.apache.hadoop.hbase.client.HBaseAdmin
- Split a table or an individual region.
- split(String, String) -
Method in class org.apache.hadoop.hbase.client.HBaseAdmin
-
- split(byte[], byte[]) -
Method in class org.apache.hadoop.hbase.client.HBaseAdmin
- Split a table or an individual region.
- split(int, ByteRange) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.encode.tokenize.TokenizerNode
- Called when we need to convert a leaf node into a branch with 2 leaves.
- split(AdminProtos.AdminService.BlockingInterface, HRegionInfo, byte[]) -
Static method in class org.apache.hadoop.hbase.protobuf.ProtobufUtil
- A helper to split a region using admin protocol.
- split(Path, Path, Path, FileSystem, Configuration) -
Static method in class org.apache.hadoop.hbase.regionserver.wal.HLogSplitter
-
- split(byte[], byte[], int) -
Static method in class org.apache.hadoop.hbase.util.Bytes
- Split passed range.
- split(byte[], byte[], boolean, int) -
Static method in class org.apache.hadoop.hbase.util.Bytes
- Split passed range.
- split(byte[], byte[]) -
Method in class org.apache.hadoop.hbase.util.RegionSplitter.HexStringSplit
-
- split(int) -
Method in class org.apache.hadoop.hbase.util.RegionSplitter.HexStringSplit
-
- split(byte[], byte[]) -
Method in interface org.apache.hadoop.hbase.util.RegionSplitter.SplitAlgorithm
- Split a pre-existing region into 2 regions.
- split(int) -
Method in interface org.apache.hadoop.hbase.util.RegionSplitter.SplitAlgorithm
- Split an entire table.
- split(byte[], byte[]) -
Method in class org.apache.hadoop.hbase.util.RegionSplitter.UniformSplit
-
- split(int) -
Method in class org.apache.hadoop.hbase.util.RegionSplitter.UniformSplit
-
- split2(BigInteger, BigInteger) -
Method in class org.apache.hadoop.hbase.util.RegionSplitter.HexStringSplit
- Divide 2 numbers in half (for split algorithm)
- SPLIT_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo
-
- SPLIT_KEY -
Static variable in interface org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource
-
- SPLIT_KEYS_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest
-
- SPLIT_LOGDIR_NAME -
Static variable in class org.apache.hadoop.hbase.HConstants
- Used to construct the name of the splitlog directory for a region server
- SPLIT_PARTS_KEY -
Static variable in class org.apache.hadoop.hbase.regionserver.StripeStoreConfig
- The target count of new stripes to produce when splitting a stripe.
- SPLIT_POINT_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest
-
- SPLIT_POLICY -
Static variable in class org.apache.hadoop.hbase.HTableDescriptor
-
- SPLIT_PONR_VALUE -
Static variable in enum org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode
SPLIT_PONR = 5;
- SPLIT_QUEUE_LENGTH -
Static variable in interface org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource
-
- SPLIT_QUEUE_LENGTH_DESC -
Static variable in interface org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource
-
- SPLIT_REQUEST_DESC -
Static variable in interface org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource
-
- SPLIT_REQUEST_KEY -
Static variable in interface org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource
-
- SPLIT_REVERTED_VALUE -
Static variable in enum org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode
SPLIT_REVERTED = 9;
- SPLIT_SIZE_DESC -
Static variable in interface org.apache.hadoop.hbase.master.MetricsMasterFileSystemSource
-
- SPLIT_SIZE_NAME -
Static variable in interface org.apache.hadoop.hbase.master.MetricsMasterFileSystemSource
-
- SPLIT_SKIP_ERRORS_DEFAULT -
Static variable in interface org.apache.hadoop.hbase.regionserver.wal.HLog
-
- SPLIT_SUCCESS_DESC -
Static variable in interface org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource
-
- SPLIT_SUCCESS_KEY -
Static variable in interface org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource
-
- SPLIT_TIME_DESC -
Static variable in interface org.apache.hadoop.hbase.master.MetricsMasterFileSystemSource
-
- SPLIT_TIME_NAME -
Static variable in interface org.apache.hadoop.hbase.master.MetricsMasterFileSystemSource
-
- SPLIT_VALUE -
Static variable in enum org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State
SPLIT = 8;
- SPLIT_VALUE -
Static variable in enum org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode
SPLIT = 7;
- SPLITA_QUALIFIER -
Static variable in class org.apache.hadoop.hbase.HConstants
- The lower-half split region column qualifier
- SPLITB_QUALIFIER -
Static variable in class org.apache.hadoop.hbase.HConstants
- The upper-half split region column qualifier
- splitKerberosName(String) -
Static method in class org.apache.hadoop.hbase.security.SaslUtil
- Splitting fully qualified Kerberos name into parts
- splitkey -
Variable in class org.apache.hadoop.hbase.io.HalfStoreFileReader
-
- SPLITKEY_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference
-
- splitLog(ServerName) -
Method in class org.apache.hadoop.hbase.master.MasterFileSystem
-
- splitLog(Set<ServerName>) -
Method in class org.apache.hadoop.hbase.master.MasterFileSystem
-
- splitLog(Set<ServerName>, PathFilter) -
Method in class org.apache.hadoop.hbase.master.MasterFileSystem
- This method is the base split method that splits HLog files matching a filter.
- SplitLogCounters - Class in org.apache.hadoop.hbase
- Counters kept by the distributed WAL split log process.
- SplitLogCounters() -
Constructor for class org.apache.hadoop.hbase.SplitLogCounters
-
- splitLogDistributed(Path) -
Method in class org.apache.hadoop.hbase.master.SplitLogManager
-
- splitLogDistributed(List<Path>) -
Method in class org.apache.hadoop.hbase.master.SplitLogManager
- The caller will block until all the log files of the given region server
have been processed - successfully split or an error is encountered - by an
available worker region server.
- splitLogDistributed(Set<ServerName>, List<Path>, PathFilter) -
Method in class org.apache.hadoop.hbase.master.SplitLogManager
- The caller will block until all the hbase:meta log files of the given region server
have been processed - successfully split or an error is encountered - by an
available worker region server.
- splitLogFile(Path, FileStatus, FileSystem, Configuration, CancelableProgressable, LastSequenceId, ZooKeeperWatcher, ZooKeeperProtos.SplitLogTask.RecoveryMode) -
Static method in class org.apache.hadoop.hbase.regionserver.wal.HLogSplitter
- Splits a HLog file into region's recovered-edits directory.
- SplitLogManager - Class in org.apache.hadoop.hbase.master
- Distributes the task of log splitting to the available region servers.
- SplitLogManager(ZooKeeperWatcher, Configuration, Stoppable, MasterServices, ServerName, boolean) -
Constructor for class org.apache.hadoop.hbase.master.SplitLogManager
- Wrapper around
SplitLogManager.SplitLogManager(ZooKeeperWatcher zkw, Configuration conf,
Stoppable stopper, MasterServices master, ServerName serverName,
boolean masterRecovery, TaskFinisher tf)
that provides a task finisher for copying recovered edits to their final destination.
- SplitLogManager(ZooKeeperWatcher, Configuration, Stoppable, MasterServices, ServerName, boolean, SplitLogManager.TaskFinisher) -
Constructor for class org.apache.hadoop.hbase.master.SplitLogManager
- Its OK to construct this object even when region-servers are not online.
- SplitLogManager.TaskFinisher - Interface in org.apache.hadoop.hbase.master
SplitLogManager can use objects implementing this interface to
finish off a partially done task by SplitLogWorker.- SplitLogManager.TaskFinisher.Status - Enum in org.apache.hadoop.hbase.master
- status that can be returned finish()
- SplitLogTask - Class in org.apache.hadoop.hbase
- State of a WAL log split during distributed splitting.
- SplitLogTask.Done - Class in org.apache.hadoop.hbase
-
- SplitLogTask.Done(ServerName, ZooKeeperProtos.SplitLogTask.RecoveryMode) -
Constructor for class org.apache.hadoop.hbase.SplitLogTask.Done
-
- SplitLogTask.Err - Class in org.apache.hadoop.hbase
-
- SplitLogTask.Err(ServerName, ZooKeeperProtos.SplitLogTask.RecoveryMode) -
Constructor for class org.apache.hadoop.hbase.SplitLogTask.Err
-
- SplitLogTask.Owned - Class in org.apache.hadoop.hbase
-
- SplitLogTask.Owned(ServerName, ZooKeeperProtos.SplitLogTask.RecoveryMode) -
Constructor for class org.apache.hadoop.hbase.SplitLogTask.Owned
-
- SplitLogTask.Resigned - Class in org.apache.hadoop.hbase
-
- SplitLogTask.Resigned(ServerName, ZooKeeperProtos.SplitLogTask.RecoveryMode) -
Constructor for class org.apache.hadoop.hbase.SplitLogTask.Resigned
-
- SplitLogTask.Unassigned - Class in org.apache.hadoop.hbase
-
- SplitLogTask.Unassigned(ServerName, ZooKeeperProtos.SplitLogTask.RecoveryMode) -
Constructor for class org.apache.hadoop.hbase.SplitLogTask.Unassigned
-
- SplitLogWorker - Class in org.apache.hadoop.hbase.regionserver
- This worker is spawned in every regionserver (should we also spawn one in
the master?).
- SplitLogWorker(ZooKeeperWatcher, Configuration, RegionServerServices, SplitLogWorker.TaskExecutor) -
Constructor for class org.apache.hadoop.hbase.regionserver.SplitLogWorker
-
- SplitLogWorker(ZooKeeperWatcher, Configuration, RegionServerServices, LastSequenceId) -
Constructor for class org.apache.hadoop.hbase.regionserver.SplitLogWorker
-
- SplitLogWorker.TaskExecutor - Interface in org.apache.hadoop.hbase.regionserver
- Objects implementing this interface actually do the task that has been
acquired by a
SplitLogWorker. - SplitLogWorker.TaskExecutor.Status - Enum in org.apache.hadoop.hbase.regionserver
-
- splitLogZNode -
Variable in class org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher
-
- splitMetaLog(ServerName) -
Method in class org.apache.hadoop.hbase.master.MasterFileSystem
- Specialized method to handle the splitting for meta HLog
- splitMetaLog(Set<ServerName>) -
Method in class org.apache.hadoop.hbase.master.MasterFileSystem
- Specialized method to handle the splitting for meta HLog
- splitRegion(CatalogTracker, HRegionInfo, HRegionInfo, HRegionInfo, ServerName) -
Static method in class org.apache.hadoop.hbase.catalog.MetaEditor
- Splits the region into two in an atomic operation.
- splitRegion(RpcController, AdminProtos.SplitRegionRequest) -
Method in interface org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface
-
- splitRegion(RpcController, AdminProtos.SplitRegionRequest, RpcCallback<AdminProtos.SplitRegionResponse>) -
Method in interface org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.Interface
rpc SplitRegion(.SplitRegionRequest) returns (.SplitRegionResponse);
- splitRegion(RpcController, AdminProtos.SplitRegionRequest, RpcCallback<AdminProtos.SplitRegionResponse>) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService
rpc SplitRegion(.SplitRegionRequest) returns (.SplitRegionResponse);
- splitRegion(RpcController, AdminProtos.SplitRegionRequest, RpcCallback<AdminProtos.SplitRegionResponse>) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.Stub
-
- splitRegion(RpcController, AdminProtos.SplitRegionRequest) -
Method in class org.apache.hadoop.hbase.regionserver.HRegionServer
- Split a region on the region server.
- splitStoreFile(LoadIncrementalHFiles.LoadQueueItem, HTable, byte[], byte[]) -
Method in class org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles
-
- SPLITTING_EXT -
Static variable in interface org.apache.hadoop.hbase.regionserver.wal.HLog
- File Extension used while splitting an HLog into regions (HBASE-2312)
- SPLITTING_NEW_VALUE -
Static variable in enum org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State
SPLITTING_NEW = 13;
- SPLITTING_VALUE -
Static variable in enum org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State
SPLITTING = 7;
- SplitTransaction - Class in org.apache.hadoop.hbase.regionserver
- Executes region split as a "transaction".
- SplitTransaction(HRegion, byte[]) -
Constructor for class org.apache.hadoop.hbase.regionserver.SplitTransaction
- Constructor
- src -
Variable in class org.apache.hadoop.hbase.types.StructIterator
-
- SRC_CHECKSUM_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo
-
- STACK_TRACE_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ExceptionResponse
-
- STAMP_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState
-
- stampSet -
Variable in class org.apache.hadoop.hbase.filter.DependentColumnFilter
-
- standardSchemeReadValue(TProtocol, TField) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TMutation
-
- standardSchemeWriteValue(TProtocol) -
Method in class org.apache.hadoop.hbase.thrift2.generated.TMutation
-
- start() -
Method in class org.apache.hadoop.hbase.backup.example.TableHFileArchiveTracker
- Start monitoring for archive updates
- start() -
Method in class org.apache.hadoop.hbase.catalog.CatalogTracker
- Starts the catalog tracker.
- start(RootDoc) -
Static method in class org.apache.hadoop.hbase.classification.tools.ExcludePrivateAnnotationsStandardDoclet
-
- start(RootDoc) -
Static method in class org.apache.hadoop.hbase.classification.tools.IncludePublicAnnotationsStandardDoclet
-
- start(CoprocessorEnvironment) -
Method in class org.apache.hadoop.hbase.constraint.ConstraintProcessor
-
- start(CoprocessorEnvironment) -
Method in class org.apache.hadoop.hbase.coprocessor.AggregateImplementation
- Stores a reference to the coprocessor environment provided by the
RegionCoprocessorHost from the region where this
coprocessor is loaded.
- start(CoprocessorEnvironment) -
Method in class org.apache.hadoop.hbase.coprocessor.BaseMasterAndRegionObserver
-
- start(CoprocessorEnvironment) -
Method in class org.apache.hadoop.hbase.coprocessor.BaseMasterObserver
-
- start(CoprocessorEnvironment) -
Method in class org.apache.hadoop.hbase.coprocessor.BaseRegionObserver
-
- start(CoprocessorEnvironment) -
Method in class org.apache.hadoop.hbase.coprocessor.BaseRegionServerObserver
-
- start(CoprocessorEnvironment) -
Method in class org.apache.hadoop.hbase.coprocessor.BaseRowProcessorEndpoint
- Stores a reference to the coprocessor environment provided by the
RegionCoprocessorHost from the region where this
coprocessor is loaded.
- start(CoprocessorEnvironment) -
Method in class org.apache.hadoop.hbase.coprocessor.BaseWALObserver
-
- start(CoprocessorEnvironment) -
Method in class org.apache.hadoop.hbase.coprocessor.example.BulkDeleteEndpoint
-
- start(CoprocessorEnvironment) -
Method in class org.apache.hadoop.hbase.coprocessor.example.RowCountEndpoint
- Stores a reference to the coprocessor environment provided by the
RegionCoprocessorHost from the region where this
coprocessor is loaded.
- start(CoprocessorEnvironment) -
Method in class org.apache.hadoop.hbase.coprocessor.example.ZooKeeperScanPolicyObserver
-
- start(CoprocessorEnvironment) -
Method in class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint
- Stores a reference to the coprocessor environment provided by the
RegionCoprocessorHost from the region where this
coprocessor is loaded.
- start(CoprocessorEnvironment) -
Method in interface org.apache.hadoop.hbase.Coprocessor
-
- start() -
Method in class org.apache.hadoop.hbase.errorhandling.TimeoutExceptionInjector
- Start a timer to fail a process if it takes longer than the expected time to complete.
- start() -
Method in class org.apache.hadoop.hbase.ipc.FifoRpcScheduler
-
- start(int) -
Method in class org.apache.hadoop.hbase.ipc.RpcExecutor
-
- start() -
Method in class org.apache.hadoop.hbase.ipc.RpcScheduler
- Prepares for request serving.
- start() -
Method in class org.apache.hadoop.hbase.ipc.RpcServer
- Starts the service.
- start() -
Method in interface org.apache.hadoop.hbase.ipc.RpcServerInterface
-
- start() -
Method in class org.apache.hadoop.hbase.ipc.SimpleRpcScheduler
-
- start(CoprocessorEnvironment) -
Method in class org.apache.hadoop.hbase.JMXListener
-
- START -
Static variable in class org.apache.hadoop.hbase.mapreduce.SimpleTotalOrderPartitioner
- Deprecated.
- start() -
Method in class org.apache.hadoop.hbase.master.TableNamespaceManager
-
- start(ProcedureCoordinator) -
Method in interface org.apache.hadoop.hbase.procedure.ProcedureCoordinatorRpcs
- Initialize and start threads necessary to connect an implementation's rpc mechanisms.
- start(String, ProcedureMember) -
Method in interface org.apache.hadoop.hbase.procedure.ProcedureMemberRpcs
- Initialize and start any threads or connections the member needs.
- start() -
Method in class org.apache.hadoop.hbase.procedure.RegionServerProcedureManager
- Start accepting procedure requests.
- start() -
Method in class org.apache.hadoop.hbase.procedure.RegionServerProcedureManagerHost
-
- start(ProcedureCoordinator) -
Method in class org.apache.hadoop.hbase.procedure.ZKProcedureCoordinatorRpcs
- Start monitoring znodes in ZK - subclass hook to start monitoring znodes they are about.
- start(String, ProcedureMember) -
Method in class org.apache.hadoop.hbase.procedure.ZKProcedureMemberRpcs
-
- start(String) -
Method in interface org.apache.hadoop.hbase.regionserver.compactions.CompactionThroughputController
- Start a compaction.
- start(String) -
Method in class org.apache.hadoop.hbase.regionserver.compactions.NoLimitCompactionThroughputController
-
- start(String) -
Method in class org.apache.hadoop.hbase.regionserver.compactions.PressureAwareCompactionThroughputController
-
- start() -
Method in class org.apache.hadoop.hbase.regionserver.HeapMemoryManager
-
- start() -
Method in class org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager
- Start accepting snapshot requests.
- start() -
Method in class org.apache.hadoop.hbase.regionserver.SplitLogWorker
- start the SplitLogWorker thread
- start(CoprocessorEnvironment) -
Method in class org.apache.hadoop.hbase.security.access.AccessController
-
- start(CoprocessorEnvironment) -
Method in class org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint
-
- start() -
Method in class org.apache.hadoop.hbase.security.access.ZKPermissionWatcher
-
- start() -
Method in class org.apache.hadoop.hbase.security.token.AuthenticationTokenSecretManager
-
- start(CoprocessorEnvironment) -
Method in class org.apache.hadoop.hbase.security.token.TokenProvider
-
- start() -
Method in class org.apache.hadoop.hbase.security.token.ZKSecretWatcher
-
- start(CoprocessorEnvironment) -
Method in class org.apache.hadoop.hbase.security.visibility.VisibilityController
-
- start(CoprocessorEnvironment) -
Method in class org.apache.hadoop.hbase.security.visibility.VisibilityController.VisibilityReplication
-
- start() -
Method in class org.apache.hadoop.hbase.security.visibility.VisibilityReplicationEndpoint
-
- start() -
Method in class org.apache.hadoop.hbase.security.visibility.ZKVisibilityLabelWatcher
-
- start() -
Method in class org.apache.hadoop.hbase.util.HasThread
-
- start() -
Method in class org.apache.hadoop.hbase.util.JvmPauseMonitor
-
- start() -
Method in class org.apache.hadoop.hbase.ZKNamespaceManager
-
- start() -
Method in class org.apache.hadoop.hbase.zookeeper.DrainingServerTracker
- Starts the tracking of draining RegionServers.
- start() -
Method in class org.apache.hadoop.hbase.zookeeper.RegionServerTracker
- Starts the tracking of online RegionServers.
- start() -
Method in class org.apache.hadoop.hbase.zookeeper.ZKLeaderManager
- Deprecated.
- start() -
Method in class org.apache.hadoop.hbase.zookeeper.ZooKeeperNodeTracker
- Starts the tracking of the node in ZooKeeper.
- START_CODE_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName
-
- START_DATE_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp
-
- START_KEY_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo
-
- START_ROW_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan
-
- START_TIME_KEY -
Static variable in class org.apache.hadoop.hbase.mapreduce.HLogInputFormat
-
- startAndPrefix -
Variable in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithPrefix_args
- the prefix (and thus start row) of the keys you want
- startAndWait() -
Method in class org.apache.hadoop.hbase.security.visibility.VisibilityReplicationEndpoint
-
- startCacheFlush(byte[]) -
Method in interface org.apache.hadoop.hbase.regionserver.wal.HLog
- WAL keeps track of the sequence numbers that were not yet flushed from memstores
in order to be able to do cleanup.
- startCancel() -
Method in class org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController
-
- startCancel() -
Method in class org.apache.hadoop.hbase.ipc.ServerRpcController
-
- startCatalogJanitorChore() -
Method in class org.apache.hadoop.hbase.master.HMaster
- Useful for testing purpose also where we have
master restart scenarios.
- STARTCODE_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node
-
- STARTCODE_QUALIFIER -
Static variable in class org.apache.hadoop.hbase.HConstants
- The startcode column qualifier
- startConnectorServer(int, int) -
Method in class org.apache.hadoop.hbase.JMXListener
-
- startDelay(boolean) -
Method in interface org.apache.hadoop.hbase.ipc.Delayable
- Signal that the call response should be delayed, thus freeing the RPC
server to handle different requests.
- startExecutorService(ExecutorType, int) -
Method in class org.apache.hadoop.hbase.executor.ExecutorService
-
- startHandlers(int) -
Method in class org.apache.hadoop.hbase.ipc.RpcExecutor
-
- startHandlers(String, int, List<BlockingQueue<CallRunner>>, int, int, int) -
Method in class org.apache.hadoop.hbase.ipc.RpcExecutor
-
- startHandlers(int) -
Method in class org.apache.hadoop.hbase.ipc.RWQueueRpcExecutor
-
- startKey -
Variable in class org.apache.hadoop.hbase.thrift.generated.TRegionInfo
-
- STARTKEY_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo.Region
-
- startOperation(long, long, Stoppable) -
Method in class org.apache.hadoop.hbase.regionserver.ServerNonceManager
- Starts the operation if operation with such nonce has not already succeeded.
- startProcedure(ForeignExceptionDispatcher, String, byte[], List<String>) -
Method in class org.apache.hadoop.hbase.procedure.ProcedureCoordinator
- Kick off the named procedure
- startRegionOperation() -
Method in class org.apache.hadoop.hbase.regionserver.HRegion
- This method needs to be called before any public call that reads or
modifies data.
- startRegionOperation(HRegion.Operation) -
Method in class org.apache.hadoop.hbase.regionserver.HRegion
-
- startRegionServer(HRegionServer) -
Static method in class org.apache.hadoop.hbase.regionserver.HRegionServer
-
- startRegionServer(HRegionServer, String) -
Static method in class org.apache.hadoop.hbase.regionserver.HRegionServer
-
- startReplicationService() -
Method in interface org.apache.hadoop.hbase.regionserver.ReplicationService
- Start replication services.
- startReplicationService() -
Method in class org.apache.hadoop.hbase.replication.regionserver.Replication
- If replication is enabled and this cluster is a master,
it starts
- startRow -
Variable in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpen_args
- Starting row in table to scan.
- startRow -
Variable in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenTs_args
- Starting row in table to scan.
- startRow -
Variable in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStop_args
- Starting row in table to scan.
- startRow -
Variable in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStopTs_args
- Starting row in table to scan.
- startRow -
Variable in class org.apache.hadoop.hbase.thrift.generated.TScan
-
- startRow -
Variable in class org.apache.hadoop.hbase.thrift2.generated.TScan
-
- STARTROW_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner
-
- startStateTracker(ZooKeeperWatcher, String) -
Method in class org.apache.hadoop.hbase.replication.ReplicationPeerZKImpl
- start a state tracker to check whether this peer is enabled or not
- startsWith(byte[], byte[]) -
Static method in class org.apache.hadoop.hbase.util.Bytes
- Return true if the byte array on the right is a prefix of the byte
array on the left.
- startTableCFsTracker(ZooKeeperWatcher, String) -
Method in class org.apache.hadoop.hbase.replication.ReplicationPeerZKImpl
- start a table-cfs tracker to listen the (table, cf-list) map change
- startThreads() -
Method in class org.apache.hadoop.hbase.ipc.RpcServer
- Starts the service threads but does not allow requests to be responded yet.
- startThreads() -
Method in interface org.apache.hadoop.hbase.ipc.RpcServerInterface
-
- STARTTIME_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.rest.protobuf.generated.ScannerMessage.Scanner
-
- startup() -
Method in class org.apache.hadoop.hbase.coprocessor.CoprocessorHost.Environment
- Initialize the environment
- startup() -
Method in class org.apache.hadoop.hbase.LocalHBaseCluster
- Start the cluster.
- startup() -
Method in class org.apache.hadoop.hbase.replication.regionserver.ReplicationSource
-
- startup() -
Method in interface org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceInterface
- Start the replication
- startup(List<JVMClusterUtil.MasterThread>, List<JVMClusterUtil.RegionServerThread>) -
Static method in class org.apache.hadoop.hbase.util.JVMClusterUtil
- Start the cluster.
- startup(File) -
Method in class org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster
-
- startup(File, int) -
Method in class org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster
-
- startWriterThreads() -
Method in class org.apache.hadoop.hbase.io.hfile.bucket.BucketCache
- Called by the constructor to start the writer threads.
- startWriting(BlockType) -
Method in class org.apache.hadoop.hbase.io.hfile.HFileBlock.Writer
- Starts writing into the block.
- state -
Variable in enum org.apache.hadoop.hbase.security.SaslStatus
-
- state() -
Method in class org.apache.hadoop.hbase.security.visibility.VisibilityReplicationEndpoint
-
- STATE_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState
-
- STATE_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer
-
- STATE_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState
-
- STATE_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask
-
- STATE_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table
-
- STATE_QUALIFIER -
Static variable in class org.apache.hadoop.hbase.HConstants
- The state column qualifier
- StateDumpServlet - Class in org.apache.hadoop.hbase.monitoring
-
- StateDumpServlet() -
Constructor for class org.apache.hadoop.hbase.monitoring.StateDumpServlet
-
- statelessUncompressor -
Variable in class org.apache.hadoop.hbase.regionserver.wal.WALCellCodec
-
- STATIC_BLOOM_SIZE -
Static variable in interface org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource
-
- STATIC_BLOOM_SIZE_DESC -
Static variable in interface org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource
-
- STATIC_INDEX_SIZE -
Static variable in interface org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource
-
- STATIC_INDEX_SIZE_DESC -
Static variable in interface org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource
-
- StatisticsHConnection - Interface in org.apache.hadoop.hbase.client
- A server statistics tracking aware HConnection.
- STATS_RECORD_SEP -
Static variable in class org.apache.hadoop.hbase.util.ByteBloomFilter
- Record separator for the Bloom filter statistics human-readable string
- StatsTrackingRpcRetryingCaller<T> - Class in org.apache.hadoop.hbase.client
- An
RpcRetryingCaller that will update the per-region stats for the call on return,
if stats are available - StatsTrackingRpcRetryingCaller(long, int, int, ServerStatisticTracker) -
Constructor for class org.apache.hadoop.hbase.client.StatsTrackingRpcRetryingCaller
-
- status -
Variable in class org.apache.hadoop.hbase.master.snapshot.TakeSnapshotHandler
-
- STATUS_MULTICAST_ADDRESS -
Static variable in class org.apache.hadoop.hbase.HConstants
- IP to use for the multicast status messages between the master and the clients.
- STATUS_MULTICAST_BIND_ADDRESS -
Static variable in class org.apache.hadoop.hbase.HConstants
- The address to use for binding the local socket for receiving multicast.
- STATUS_MULTICAST_PORT -
Static variable in class org.apache.hadoop.hbase.HConstants
- The port to use for the multicast messages.
- STATUS_PUBLISH_PERIOD -
Static variable in class org.apache.hadoop.hbase.master.ClusterStatusPublisher
- The minimum time between two status messages, in milliseconds.
- STATUS_PUBLISHED -
Static variable in class org.apache.hadoop.hbase.HConstants
- Setting to activate, or not, the publication of the status by the master.
- STATUS_PUBLISHED_DEFAULT -
Static variable in class org.apache.hadoop.hbase.HConstants
-
- STATUS_PUBLISHER_CLASS -
Static variable in class org.apache.hadoop.hbase.master.ClusterStatusPublisher
- The implementation class used to publish the status.
- std(TableName, ColumnInterpreter<R, S, P, Q, T>, Scan) -
Method in class org.apache.hadoop.hbase.client.coprocessor.AggregationClient
- This is the client side interface/handle for calling the std method for a
given cf-cq combination.
- std(HTable, ColumnInterpreter<R, S, P, Q, T>, Scan) -
Method in class org.apache.hadoop.hbase.client.coprocessor.AggregationClient
- This is the client side interface/handle for calling the std method for a
given cf-cq combination.
- STD_DEV_METRIC_NAME -
Static variable in class org.apache.hadoop.hbase.metrics.histogram.MetricsHistogram
- Deprecated.
- stepDownAsLeader() -
Method in class org.apache.hadoop.hbase.zookeeper.ZKLeaderManager
- Deprecated. Removes the leader znode, if it is currently claimed by this instance.
- stepsAfterPONR(Server, RegionServerServices, HRegion) -
Method in class org.apache.hadoop.hbase.regionserver.RegionMergeTransaction
-
- stepsAfterPONR(Server, RegionServerServices, PairOfSameType<HRegion>) -
Method in class org.apache.hadoop.hbase.regionserver.SplitTransaction
-
- stepsBeforePONR(Server, RegionServerServices, boolean) -
Method in class org.apache.hadoop.hbase.regionserver.RegionMergeTransaction
-
- stepsBeforePONR(Server, RegionServerServices, boolean) -
Method in class org.apache.hadoop.hbase.regionserver.SplitTransaction
-
- StochasticLoadBalancer - Class in org.apache.hadoop.hbase.master.balancer
- This is a best effort load balancer.
- StochasticLoadBalancer() -
Constructor for class org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer
-
- StochasticLoadBalancer.CostFromRegionLoadFunction - Class in org.apache.hadoop.hbase.master.balancer
- Base class the allows writing costs functions from rolling average of some
number from RegionLoad.
- StochasticLoadBalancer.CostFunction - Class in org.apache.hadoop.hbase.master.balancer
- Base class of StochasticLoadBalancer's Cost Functions.
- StochasticLoadBalancer.LoadPicker - Class in org.apache.hadoop.hbase.master.balancer
-
- StochasticLoadBalancer.LoadPicker() -
Constructor for class org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer.LoadPicker
-
- StochasticLoadBalancer.LocalityCostFunction - Class in org.apache.hadoop.hbase.master.balancer
- Compute a cost of a potential cluster configuration based upon where
StoreFiles are located. - StochasticLoadBalancer.MemstoreSizeCostFunction - Class in org.apache.hadoop.hbase.master.balancer
- Compute the cost of total memstore size.
- StochasticLoadBalancer.MoveCostFunction - Class in org.apache.hadoop.hbase.master.balancer
- Given the starting state of the regions and a potential ending state
compute cost based upon the number of regions that have moved.
- StochasticLoadBalancer.ReadRequestCostFunction - Class in org.apache.hadoop.hbase.master.balancer
- Compute the cost of total number of read requests The more unbalanced the higher the
computed cost will be.
- StochasticLoadBalancer.RegionCountSkewCostFunction - Class in org.apache.hadoop.hbase.master.balancer
- Compute the cost of a potential cluster state from skew in number of
regions on a cluster.
- StochasticLoadBalancer.StoreFileCostFunction - Class in org.apache.hadoop.hbase.master.balancer
- Compute the cost of total open storefiles size.
- StochasticLoadBalancer.TableSkewCostFunction - Class in org.apache.hadoop.hbase.master.balancer
- Compute the cost of a potential cluster configuration based upon how evenly
distributed tables are.
- StochasticLoadBalancer.WriteRequestCostFunction - Class in org.apache.hadoop.hbase.master.balancer
- Compute the cost of total number of write requests.
- stop(String) -
Method in class org.apache.hadoop.hbase.backup.example.LongTermArchivingHFileCleaner
-
- stop() -
Method in class org.apache.hadoop.hbase.backup.example.TableHFileArchiveTracker
- Stop this tracker and the passed zookeeper
- stop() -
Method in class org.apache.hadoop.hbase.catalog.CatalogTracker
- Stop working.
- stop(CoprocessorEnvironment) -
Method in class org.apache.hadoop.hbase.coprocessor.AggregateImplementation
-
- stop(CoprocessorEnvironment) -
Method in class org.apache.hadoop.hbase.coprocessor.BaseMasterAndRegionObserver
-
- stop(CoprocessorEnvironment) -
Method in class org.apache.hadoop.hbase.coprocessor.BaseMasterObserver
-
- stop(CoprocessorEnvironment) -
Method in class org.apache.hadoop.hbase.coprocessor.BaseRegionObserver
-
- stop(CoprocessorEnvironment) -
Method in class org.apache.hadoop.hbase.coprocessor.BaseRegionServerObserver
-
- stop(CoprocessorEnvironment) -
Method in class org.apache.hadoop.hbase.coprocessor.BaseRowProcessorEndpoint
-
- stop(CoprocessorEnvironment) -
Method in class org.apache.hadoop.hbase.coprocessor.BaseWALObserver
-
- stop(CoprocessorEnvironment) -
Method in class org.apache.hadoop.hbase.coprocessor.example.BulkDeleteEndpoint
-
- stop(CoprocessorEnvironment) -
Method in class org.apache.hadoop.hbase.coprocessor.example.RowCountEndpoint
-
- stop(CoprocessorEnvironment) -
Method in class org.apache.hadoop.hbase.coprocessor.example.ZooKeeperScanPolicyObserver
-
- stop(CoprocessorEnvironment) -
Method in class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint
-
- stop(CoprocessorEnvironment) -
Method in interface org.apache.hadoop.hbase.Coprocessor
-
- stop() -
Method in class org.apache.hadoop.hbase.ipc.FifoRpcScheduler
-
- stop() -
Method in class org.apache.hadoop.hbase.ipc.RpcClient
- Stop all threads related to this client.
- stop() -
Method in class org.apache.hadoop.hbase.ipc.RpcExecutor
-
- stop() -
Method in class org.apache.hadoop.hbase.ipc.RpcScheduler
- Stops serving new requests.
- stop() -
Method in class org.apache.hadoop.hbase.ipc.RpcServer
- Stops the service.
- stop() -
Method in interface org.apache.hadoop.hbase.ipc.RpcServerInterface
-
- stop() -
Method in class org.apache.hadoop.hbase.ipc.SimpleRpcScheduler
-
- stop(CoprocessorEnvironment) -
Method in class org.apache.hadoop.hbase.JMXListener
-
- stop() -
Method in class org.apache.hadoop.hbase.master.ActiveMasterManager
-
- stop() -
Method in class org.apache.hadoop.hbase.master.AssignmentManager
-
- stop(String) -
Method in class org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer
-
- stop(String) -
Method in class org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate
-
- stop(String) -
Method in class org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner
-
- stop(String) -
Method in class org.apache.hadoop.hbase.master.HMaster
-
- stop() -
Method in class org.apache.hadoop.hbase.master.MasterFileSystem
-
- stop() -
Method in class org.apache.hadoop.hbase.master.ServerManager
- Stop the ServerManager.
- stop(String) -
Method in class org.apache.hadoop.hbase.master.snapshot.SnapshotFileCache
-
- stop(String) -
Method in class org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner
-
- stop(String) -
Method in class org.apache.hadoop.hbase.master.snapshot.SnapshotLogCleaner
-
- stop(String) -
Method in class org.apache.hadoop.hbase.master.snapshot.SnapshotManager
-
- stop() -
Method in class org.apache.hadoop.hbase.master.SplitLogManager
-
- stop(String) -
Method in class org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost
-
- stop(boolean) -
Method in class org.apache.hadoop.hbase.procedure.RegionServerProcedureManager
- Close this and all running procedure tasks
- stop(boolean) -
Method in class org.apache.hadoop.hbase.procedure.RegionServerProcedureManagerHost
-
- stop(String) -
Method in class org.apache.hadoop.hbase.regionserver.compactions.NoLimitCompactionThroughputController
-
- stop(String) -
Method in class org.apache.hadoop.hbase.regionserver.compactions.PressureAwareCompactionThroughputController
-
- stop() -
Method in class org.apache.hadoop.hbase.regionserver.HeapMemoryManager
-
- stop(String) -
Method in class org.apache.hadoop.hbase.regionserver.HRegionServer.MovedRegionsCleaner
-
- stop(String) -
Method in class org.apache.hadoop.hbase.regionserver.HRegionServer
-
- stop(boolean) -
Method in class org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager
- Close this and all running snapshot tasks
- stop() -
Method in class org.apache.hadoop.hbase.regionserver.SplitLogWorker
- stop the SplitLogWorker thread
- stop(String) -
Method in class org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner
-
- stop() -
Static method in class org.apache.hadoop.hbase.rest.RESTServlet
-
- stop(CoprocessorEnvironment) -
Method in class org.apache.hadoop.hbase.security.access.AccessController
-
- stop(CoprocessorEnvironment) -
Method in class org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint
-
- stop() -
Method in class org.apache.hadoop.hbase.security.token.AuthenticationTokenSecretManager
-
- stop(CoprocessorEnvironment) -
Method in class org.apache.hadoop.hbase.security.token.TokenProvider
-
- stop(CoprocessorEnvironment) -
Method in class org.apache.hadoop.hbase.security.visibility.VisibilityController
-
- stop(CoprocessorEnvironment) -
Method in class org.apache.hadoop.hbase.security.visibility.VisibilityController.VisibilityReplication
-
- stop() -
Method in class org.apache.hadoop.hbase.security.visibility.VisibilityReplicationEndpoint
-
- stop(String) -
Method in interface org.apache.hadoop.hbase.Stoppable
- Stop this service.
- stop() -
Method in class org.apache.hadoop.hbase.thrift.TBoundedThreadPoolServer
-
- stop() -
Method in class org.apache.hadoop.hbase.thrift.ThriftServer
-
- stop() -
Method in class org.apache.hadoop.hbase.util.JvmPauseMonitor
-
- stop() -
Method in class org.apache.hadoop.hbase.zookeeper.ZooKeeperNodeTracker
-
- stop() -
Method in class org.apache.hadoop.metrics2.lib.MetricsExecutorImpl
-
- stop() -
Method in interface org.apache.hadoop.metrics2.MetricsExecutor
-
- STOP_ROW_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan
-
- STOP_ROW_KEY_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.FilterProtos.InclusiveStopFilter
-
- STOP_TIMEOUT_CONF_KEY -
Static variable in class org.apache.hadoop.hbase.thrift.HThreadedSelectorServerArgs
- Time to wait for server to stop gracefully
- stopAndDrainOps() -
Method in class org.apache.hadoop.hbase.util.DrainBarrier
- Blocks new operations from starting, waits for the current ones to drain.
- stopAndDrainOpsOnce() -
Method in class org.apache.hadoop.hbase.util.DrainBarrier
- Blocks new operations from starting, waits for the current ones to drain.
- stopAndWait() -
Method in class org.apache.hadoop.hbase.security.visibility.VisibilityReplicationEndpoint
-
- stopConnectorServer() -
Method in class org.apache.hadoop.hbase.JMXListener
-
- stopMaster() -
Method in class org.apache.hadoop.hbase.client.HBaseAdmin
- Shuts down the current HBase master only.
- stopMaster() -
Method in class org.apache.hadoop.hbase.master.HMaster
-
- stopMaster(RpcController, MasterProtos.StopMasterRequest) -
Method in class org.apache.hadoop.hbase.master.HMaster
-
- stopMaster(RpcController, MasterProtos.StopMasterRequest) -
Method in interface org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService.BlockingInterface
-
- stopMaster(RpcController, MasterProtos.StopMasterRequest, RpcCallback<MasterProtos.StopMasterResponse>) -
Method in interface org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService.Interface
rpc StopMaster(.StopMasterRequest) returns (.StopMasterResponse);
- stopMaster(RpcController, MasterProtos.StopMasterRequest, RpcCallback<MasterProtos.StopMasterResponse>) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService
rpc StopMaster(.StopMasterRequest) returns (.StopMasterResponse);
- stopMaster(RpcController, MasterProtos.StopMasterRequest, RpcCallback<MasterProtos.StopMasterResponse>) -
Method in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService.Stub
-
- Stoppable - Interface in org.apache.hadoop.hbase
- Implementers are Stoppable.
- stopped -
Variable in class org.apache.hadoop.hbase.regionserver.HRegionServer
-
- StoppedRpcClientException - Exception in org.apache.hadoop.hbase.ipc
-
- StoppedRpcClientException() -
Constructor for exception org.apache.hadoop.hbase.ipc.StoppedRpcClientException
-
- StoppedRpcClientException(String) -
Constructor for exception org.apache.hadoop.hbase.ipc.StoppedRpcClientException
-
- stopper -
Variable in class org.apache.hadoop.hbase.Chore
-
- stopRegionServer(String) -
Method in class org.apache.hadoop.hbase.client.HBaseAdmin
- Stop the designated regionserver
- stopReplicationService() -
Method in interface org.apache.hadoop.hbase.regionserver.ReplicationService
- Stops replication service.
- stopReplicationService() -
Method in class org.apache.hadoop.hbase.replication.regionserver.Replication
- Stops replication service.
- stopReplicationSinkServices() -
Method in class org.apache.hadoop.hbase.replication.regionserver.ReplicationSink
- stop the thread pool executor.
- stopRequested -
Variable in class org.apache.hadoop.hbase.regionserver.Leases
-
- stopRow -
Variable in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStop_args
- row to stop scanning on.
- stopRow -
Variable in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStopTs_args
- row to stop scanning on.
- stopRow -
Variable in class org.apache.hadoop.hbase.thrift.generated.TScan
-
- stopRow -
Variable in class org.apache.hadoop.hbase.thrift2.generated.TScan
-
- stopServer(RpcController, AdminProtos.StopServerRequest) -
Method in interface org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface
-
- stopServer(RpcController, AdminProtos.StopServerRequest, RpcCallback<AdminProtos.StopServerResponse>) -
Method in interface org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.Interface
rpc StopServer(.StopServerRequest) returns (.StopServerResponse);
- stopServer(RpcController, AdminProtos.StopServerRequest, RpcCallback<AdminProtos.StopServerResponse>) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService
rpc StopServer(.StopServerRequest) returns (.StopServerResponse);
- stopServer(RpcController, AdminProtos.StopServerRequest, RpcCallback<AdminProtos.StopServerResponse>) -
Method in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.Stub
-
- stopServer(RpcController, AdminProtos.StopServerRequest) -
Method in class org.apache.hadoop.hbase.regionserver.HRegionServer
- Stop the region server.
- StorageClusterStatusMessage - Class in org.apache.hadoop.hbase.rest.protobuf.generated
-
- StorageClusterStatusMessage.StorageClusterStatus - Class in org.apache.hadoop.hbase.rest.protobuf.generated
- Protobuf type
org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus - StorageClusterStatusMessage.StorageClusterStatus.Builder - Class in org.apache.hadoop.hbase.rest.protobuf.generated
- Protobuf type
org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus - StorageClusterStatusMessage.StorageClusterStatus.Node - Class in org.apache.hadoop.hbase.rest.protobuf.generated
- Protobuf type
org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node - StorageClusterStatusMessage.StorageClusterStatus.Node.Builder - Class in org.apache.hadoop.hbase.rest.protobuf.generated
- Protobuf type
org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node - StorageClusterStatusMessage.StorageClusterStatus.NodeOrBuilder - Interface in org.apache.hadoop.hbase.rest.protobuf.generated
-
- StorageClusterStatusMessage.StorageClusterStatus.Region - Class in org.apache.hadoop.hbase.rest.protobuf.generated
- Protobuf type
org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region - StorageClusterStatusMessage.StorageClusterStatus.Region.Builder - Class in org.apache.hadoop.hbase.rest.protobuf.generated
- Protobuf type
org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region - StorageClusterStatusMessage.StorageClusterStatus.RegionOrBuilder - Interface in org.apache.hadoop.hbase.rest.protobuf.generated
-
- StorageClusterStatusMessage.StorageClusterStatusOrBuilder - Interface in org.apache.hadoop.hbase.rest.protobuf.generated
-
- StorageClusterStatusModel - Class in org.apache.hadoop.hbase.rest.model
- Representation of the status of a storage cluster:
- StorageClusterStatusModel() -
Constructor for class org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel
- Default constructor
- StorageClusterStatusModel.Node - Class in org.apache.hadoop.hbase.rest.model
- Represents a region server.
- StorageClusterStatusModel.Node() -
Constructor for class org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel.Node
- Default constructor
- StorageClusterStatusModel.Node(String, long) -
Constructor for class org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel.Node
- Constructor
- StorageClusterStatusModel.Node.Region - Class in org.apache.hadoop.hbase.rest.model
- Represents a region hosted on a region server.
- StorageClusterStatusModel.Node.Region() -
Constructor for class org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel.Node.Region
- Default constructor
- StorageClusterStatusModel.Node.Region(byte[]) -
Constructor for class org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel.Node.Region
- Constructor
- StorageClusterStatusModel.Node.Region(byte[], int, int, int, int, int, long, long, int, int, int, long, long) -
Constructor for class org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel.Node.Region
- Constructor
- StorageClusterStatusResource - Class in org.apache.hadoop.hbase.rest
-
- StorageClusterStatusResource() -
Constructor for class org.apache.hadoop.hbase.rest.StorageClusterStatusResource
- Constructor
- StorageClusterVersionModel - Class in org.apache.hadoop.hbase.rest.model
- Simple representation of the version of the storage cluster
- StorageClusterVersionModel() -
Constructor for class org.apache.hadoop.hbase.rest.model.StorageClusterVersionModel
-
- StorageClusterVersionResource - Class in org.apache.hadoop.hbase.rest
-
- StorageClusterVersionResource() -
Constructor for class org.apache.hadoop.hbase.rest.StorageClusterVersionResource
- Constructor
- store -
Variable in class org.apache.hadoop.hbase.io.crypto.KeyStoreKeyProvider
-
- store -
Variable in class org.apache.hadoop.hbase.regionserver.compactions.Compactor
-
- Store - Interface in org.apache.hadoop.hbase.regionserver
- Interface for objects that hold a column family in a Region.
- store -
Variable in class org.apache.hadoop.hbase.regionserver.StoreScanner
-
- store(ByteRange) -
Method in class org.apache.hadoop.hbase.util.byterange.ByteRangeSet
-
- STORE_COUNT -
Static variable in interface org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource
-
- STORE_COUNT_DESC -
Static variable in interface org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource
-
- STORE_ENGINE_CLASS_KEY -
Static variable in class org.apache.hadoop.hbase.regionserver.StoreEngine
- The name of the configuration parameter that specifies the class of
a store engine that is used to manage and compact HBase store files.
- STORE_FILE_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse
-
- STORE_FILES_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles
-
- STORE_HOME_DIR_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor
-
- STORE_LIMIT_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Get
-
- STORE_LIMIT_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan
-
- STORE_OFFSET_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Get
-
- STORE_OFFSET_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan
-
- STORE_SEQUENCE_ID_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds
-
- STORE_UNCOMPRESSED_SIZE_MB_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad
-
- storeConfigInfo -
Variable in class org.apache.hadoop.hbase.regionserver.compactions.CompactionPolicy
-
- StoreConfigInformation - Interface in org.apache.hadoop.hbase.regionserver
- A more restricted interface for HStore.
- StoreEngine<SF extends org.apache.hadoop.hbase.regionserver.StoreFlusher,CP extends CompactionPolicy,C extends Compactor,SFM extends StoreFileManager> - Class in org.apache.hadoop.hbase.regionserver
- StoreEngine is a factory that can create the objects necessary for HStore to operate.
- StoreEngine() -
Constructor for class org.apache.hadoop.hbase.regionserver.StoreEngine
-
- StoreFile - Class in org.apache.hadoop.hbase.regionserver
- A Store data file.
- StoreFile(FileSystem, Path, Configuration, CacheConfig, BloomType) -
Constructor for class org.apache.hadoop.hbase.regionserver.StoreFile
- Constructor, loads a reader and it's indices, etc.
- StoreFile(FileSystem, StoreFileInfo, Configuration, CacheConfig, BloomType) -
Constructor for class org.apache.hadoop.hbase.regionserver.StoreFile
- Constructor, loads a reader and it's indices, etc.
- StoreFile(StoreFile) -
Constructor for class org.apache.hadoop.hbase.regionserver.StoreFile
- Clone
- storeFile(HRegionInfo, String, SnapshotProtos.SnapshotRegionManifest.StoreFile) -
Method in interface org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil.StoreFileVisitor
-
- storeFile(String, String, String) -
Method in interface org.apache.hadoop.hbase.util.FSVisitor.StoreFileVisitor
-
- StoreFile.Comparators - Class in org.apache.hadoop.hbase.regionserver
- Useful comparators for comparing StoreFiles.
- StoreFile.Comparators() -
Constructor for class org.apache.hadoop.hbase.regionserver.StoreFile.Comparators
-
- StoreFile.Reader - Class in org.apache.hadoop.hbase.regionserver
- Reader for a StoreFile.
- StoreFile.Reader(FileSystem, Path, CacheConfig, Configuration) -
Constructor for class org.apache.hadoop.hbase.regionserver.StoreFile.Reader
-
- StoreFile.Reader(FileSystem, Path, FSDataInputStreamWrapper, long, CacheConfig, Configuration) -
Constructor for class org.apache.hadoop.hbase.regionserver.StoreFile.Reader
-
- StoreFile.Writer - Class in org.apache.hadoop.hbase.regionserver
- A StoreFile writer.
- StoreFile.WriterBuilder - Class in org.apache.hadoop.hbase.regionserver
-
- StoreFile.WriterBuilder(Configuration, CacheConfig, FileSystem) -
Constructor for class org.apache.hadoop.hbase.regionserver.StoreFile.WriterBuilder
-
- STOREFILE_COUNT -
Static variable in interface org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource
-
- STOREFILE_COUNT_DESC -
Static variable in interface org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource
-
- STOREFILE_INDEX_SIZE -
Static variable in interface org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource
-
- STOREFILE_INDEX_SIZE_DESC -
Static variable in interface org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource
-
- STOREFILE_INDEX_SIZE_MB_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad
-
- STOREFILE_SIZE -
Static variable in interface org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource
-
- STOREFILE_SIZE_DESC -
Static variable in interface org.apache.hadoop.hbase.regionserver.MetricsRegionServerSource
-
- STOREFILE_SIZE_MB_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad
-
- STOREFILEINDEXSIZEMB_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region
-
- StoreFileInfo - Class in org.apache.hadoop.hbase.regionserver
- Describe a StoreFile (hfile, reference, link)
- StoreFileInfo(Configuration, FileSystem, Path) -
Constructor for class org.apache.hadoop.hbase.regionserver.StoreFileInfo
- Create a Store File Info
- StoreFileInfo(Configuration, FileSystem, FileStatus) -
Constructor for class org.apache.hadoop.hbase.regionserver.StoreFileInfo
- Create a Store File Info
- storeFileManager -
Variable in class org.apache.hadoop.hbase.regionserver.StoreEngine
-
- StoreFileManager - Interface in org.apache.hadoop.hbase.regionserver
- Manages the store files and basic metadata about that that determines the logical structure
(e.g.
- STOREFILES_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad
-
- STOREFILES_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region
-
- StoreFileScanner - Class in org.apache.hadoop.hbase.regionserver
- KeyValueScanner adaptor over the Reader.
- StoreFileScanner(StoreFile.Reader, HFileScanner, boolean, boolean, long) -
Constructor for class org.apache.hadoop.hbase.regionserver.StoreFileScanner
- Implements a
KeyValueScanner on top of the specified HFileScanner
- STOREFILESIZEMB_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region
-
- storeFlusher -
Variable in class org.apache.hadoop.hbase.regionserver.StoreEngine
-
- storeLimit -
Variable in class org.apache.hadoop.hbase.regionserver.StoreScanner
-
- storeOffset -
Variable in class org.apache.hadoop.hbase.regionserver.StoreScanner
-
- stores -
Variable in class org.apache.hadoop.hbase.regionserver.HRegion
-
- STORES_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad
-
- STORES_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region
-
- StoreScanner - Class in org.apache.hadoop.hbase.regionserver
- Scanner scans both the memstore and the Store.
- StoreScanner(Store, boolean, Scan, NavigableSet<byte[]>, long, int, long) -
Constructor for class org.apache.hadoop.hbase.regionserver.StoreScanner
- An internal constructor.
- StoreScanner(Store, ScanInfo, Scan, NavigableSet<byte[]>, long) -
Constructor for class org.apache.hadoop.hbase.regionserver.StoreScanner
- Opens a scanner across memstore, snapshot, and all StoreFiles.
- StoreScanner(Store, ScanInfo, Scan, List<? extends KeyValueScanner>, ScanType, long, long) -
Constructor for class org.apache.hadoop.hbase.regionserver.StoreScanner
- Used for compactions.
- StoreScanner(Store, ScanInfo, Scan, List<? extends KeyValueScanner>, long, long, byte[], byte[]) -
Constructor for class org.apache.hadoop.hbase.regionserver.StoreScanner
- Used for compactions that drop deletes from a limited range of rows.
- STORESCANNER_PARALLEL_SEEK_ENABLE -
Static variable in class org.apache.hadoop.hbase.regionserver.StoreScanner
-
- StoreUtils - Class in org.apache.hadoop.hbase.regionserver
- Utility functions for region server storage layer.
- StoreUtils() -
Constructor for class org.apache.hadoop.hbase.regionserver.StoreUtils
-
- StreamUtils - Class in org.apache.hadoop.hbase.io.util
-
- StreamUtils() -
Constructor for class org.apache.hadoop.hbase.io.util.StreamUtils
-
- STRING -
Static variable in class org.apache.hadoop.hbase.util.ClassSize
- String overhead
- STRING_VIS_TAG_TYPE -
Static variable in class org.apache.hadoop.hbase.TagType
-
- stringify(Class[]) -
Static method in class org.apache.hadoop.hbase.util.Classes
-
- stringifyFilter(Filter) -
Static method in class org.apache.hadoop.hbase.rest.model.ScannerModel
-
- Strings - Class in org.apache.hadoop.hbase.util
- Utility for Strings.
- Strings() -
Constructor for class org.apache.hadoop.hbase.util.Strings
-
- STRIPE_END_KEY -
Static variable in class org.apache.hadoop.hbase.regionserver.StripeStoreFileManager
-
- STRIPE_START_KEY -
Static variable in class org.apache.hadoop.hbase.regionserver.StripeStoreFileManager
- The file metadata fields that contain the stripe information.
- StripeCompactionPolicy - Class in org.apache.hadoop.hbase.regionserver.compactions
- Stripe store implementation of compaction policy.
- StripeCompactionPolicy(Configuration, StoreConfigInformation, StripeStoreConfig) -
Constructor for class org.apache.hadoop.hbase.regionserver.compactions.StripeCompactionPolicy
-
- StripeCompactionPolicy.StripeCompactionRequest - Class in org.apache.hadoop.hbase.regionserver.compactions
- Stripe compaction request wrapper.
- StripeCompactionPolicy.StripeCompactionRequest(CompactionRequest) -
Constructor for class org.apache.hadoop.hbase.regionserver.compactions.StripeCompactionPolicy.StripeCompactionRequest
-
- StripeCompactionPolicy.StripeInformationProvider - Interface in org.apache.hadoop.hbase.regionserver.compactions
- The information about stripes that the policy needs to do its stuff
- StripeCompactor - Class in org.apache.hadoop.hbase.regionserver.compactions
- This is the placeholder for stripe compactor.
- StripeCompactor(Configuration, Store) -
Constructor for class org.apache.hadoop.hbase.regionserver.compactions.StripeCompactor
-
- StripeMultiFileWriter - Class in org.apache.hadoop.hbase.regionserver
- Base class for cell sink that separates the provided cells into multiple files.
- StripeMultiFileWriter() -
Constructor for class org.apache.hadoop.hbase.regionserver.StripeMultiFileWriter
-
- StripeMultiFileWriter.BoundaryMultiWriter - Class in org.apache.hadoop.hbase.regionserver
- MultiWriter that separates the cells based on fixed row-key boundaries.
- StripeMultiFileWriter.BoundaryMultiWriter(List<byte[]>, byte[], byte[]) -
Constructor for class org.apache.hadoop.hbase.regionserver.StripeMultiFileWriter.BoundaryMultiWriter
-
- StripeMultiFileWriter.SizeMultiWriter - Class in org.apache.hadoop.hbase.regionserver
- MultiWriter that separates the cells based on target cell number per file and file count.
- StripeMultiFileWriter.SizeMultiWriter(int, long, byte[], byte[]) -
Constructor for class org.apache.hadoop.hbase.regionserver.StripeMultiFileWriter.SizeMultiWriter
-
- StripeMultiFileWriter.WriterFactory - Interface in org.apache.hadoop.hbase.regionserver
-
- StripeStoreConfig - Class in org.apache.hadoop.hbase.regionserver
- Configuration class for stripe store and compactions.
- StripeStoreConfig(Configuration, StoreConfigInformation) -
Constructor for class org.apache.hadoop.hbase.regionserver.StripeStoreConfig
-
- StripeStoreEngine - Class in org.apache.hadoop.hbase.regionserver
- The storage engine that implements the stripe-based store/compaction scheme.
- StripeStoreEngine() -
Constructor for class org.apache.hadoop.hbase.regionserver.StripeStoreEngine
-
- StripeStoreFileManager - Class in org.apache.hadoop.hbase.regionserver
- Stripe implementation of StoreFileManager.
- StripeStoreFileManager(KeyValue.KVComparator, Configuration, StripeStoreConfig) -
Constructor for class org.apache.hadoop.hbase.regionserver.StripeStoreFileManager
-
- StripeStoreFlusher - Class in org.apache.hadoop.hbase.regionserver
- Stripe implementation of StoreFlusher.
- StripeStoreFlusher(Configuration, Store, StripeCompactionPolicy, StripeStoreFileManager) -
Constructor for class org.apache.hadoop.hbase.regionserver.StripeStoreFlusher
-
- StripeStoreFlusher.BoundaryStripeFlushRequest - Class in org.apache.hadoop.hbase.regionserver
- Stripe flush request wrapper based on boundaries.
- StripeStoreFlusher.BoundaryStripeFlushRequest(List<byte[]>) -
Constructor for class org.apache.hadoop.hbase.regionserver.StripeStoreFlusher.BoundaryStripeFlushRequest
-
- StripeStoreFlusher.SizeStripeFlushRequest - Class in org.apache.hadoop.hbase.regionserver
- Stripe flush request wrapper based on size.
- StripeStoreFlusher.SizeStripeFlushRequest(int, long) -
Constructor for class org.apache.hadoop.hbase.regionserver.StripeStoreFlusher.SizeStripeFlushRequest
-
- StripeStoreFlusher.StripeFlushRequest - Class in org.apache.hadoop.hbase.regionserver
- Stripe flush request wrapper that writes a non-striped file.
- StripeStoreFlusher.StripeFlushRequest() -
Constructor for class org.apache.hadoop.hbase.regionserver.StripeStoreFlusher.StripeFlushRequest
-
- strToRow(String) -
Method in class org.apache.hadoop.hbase.util.RegionSplitter.HexStringSplit
-
- strToRow(String) -
Method in interface org.apache.hadoop.hbase.util.RegionSplitter.SplitAlgorithm
-
- strToRow(String) -
Method in class org.apache.hadoop.hbase.util.RegionSplitter.UniformSplit
-
- Struct - Class in org.apache.hadoop.hbase.types
-
Struct is a simple DataType for implementing "compound
rowkey" and "compound qualifier" schema design strategies. - Struct(DataType[]) -
Constructor for class org.apache.hadoop.hbase.types.Struct
- Create a new
Struct instance defined as the sequence of
HDataTypes in memberTypes.
- StructBuilder - Class in org.apache.hadoop.hbase.types
- A helper for building
Struct instances. - StructBuilder() -
Constructor for class org.apache.hadoop.hbase.types.StructBuilder
- Create an empty
StructBuilder.
- StructIterator - Class in org.apache.hadoop.hbase.types
- An
Iterator over encoded Struct members. - StructIterator(PositionedByteRange, DataType[]) -
Constructor for class org.apache.hadoop.hbase.types.StructIterator
- Construct
StructIterator over the values encoded in src
using the specified types definition.
- subMap(byte[], byte[]) -
Method in class org.apache.hadoop.hbase.io.hfile.HFile.FileInfo
-
- submit(EventHandler) -
Method in class org.apache.hadoop.hbase.executor.ExecutorService
-
- submitMaxNodeDepthCandidate(int) -
Method in class org.apache.hadoop.hbase.codec.prefixtree.encode.tokenize.Tokenizer
-
- submitSubprocedure(Subprocedure) -
Method in class org.apache.hadoop.hbase.procedure.ProcedureMember
- Submit an subprocedure for execution.
- Subprocedure - Class in org.apache.hadoop.hbase.procedure
- Distributed procedure member's Subprocedure.
- Subprocedure(ProcedureMember, String, ForeignExceptionDispatcher, long, long) -
Constructor for class org.apache.hadoop.hbase.procedure.Subprocedure
-
- Subprocedure.SubprocedureImpl - Class in org.apache.hadoop.hbase.procedure
- Empty Subprocedure for testing.
- Subprocedure.SubprocedureImpl(ProcedureMember, String, ForeignExceptionDispatcher, long, long) -
Constructor for class org.apache.hadoop.hbase.procedure.Subprocedure.SubprocedureImpl
-
- SubprocedureFactory - Interface in org.apache.hadoop.hbase.procedure
- Task builder to build instances of a
ProcedureMember's Subprocedures. - subSet(KeyValue, KeyValue) -
Method in class org.apache.hadoop.hbase.regionserver.KeyValueSkipListSet
-
- subSet(KeyValue, boolean, KeyValue, boolean) -
Method in class org.apache.hadoop.hbase.regionserver.KeyValueSkipListSet
-
- subSet(E, E) -
Method in class org.apache.hadoop.hbase.util.SortedCopyOnWriteSet
-
- SUBSTR_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos.SubstringComparator
-
- SubstringComparator - Class in org.apache.hadoop.hbase.filter
- This comparator is for use with SingleColumnValueFilter, for filtering based on
the value of a given column.
- SubstringComparator(String) -
Constructor for class org.apache.hadoop.hbase.filter.SubstringComparator
- Constructor
- substringType -
Static variable in class org.apache.hadoop.hbase.filter.ParseConstants
- SubstringType byte array
- success -
Variable in class org.apache.hadoop.hbase.thrift.generated.Hbase.atomicIncrement_result
-
- success -
Variable in class org.apache.hadoop.hbase.thrift.generated.Hbase.get_result
-
- success -
Variable in class org.apache.hadoop.hbase.thrift.generated.Hbase.getColumnDescriptors_result
-
- success -
Variable in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRegionInfo_result
-
- success -
Variable in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRow_result
-
- success -
Variable in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowOrBefore_result
-
- success -
Variable in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRows_result
-
- success -
Variable in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsTs_result
-
- success -
Variable in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsWithColumns_result
-
- success -
Variable in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowsWithColumnsTs_result
-
- success -
Variable in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowTs_result
-
- success -
Variable in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowWithColumns_result
-
- success -
Variable in class org.apache.hadoop.hbase.thrift.generated.Hbase.getRowWithColumnsTs_result
-
- success -
Variable in class org.apache.hadoop.hbase.thrift.generated.Hbase.getTableNames_result
-
- success -
Variable in class org.apache.hadoop.hbase.thrift.generated.Hbase.getTableRegions_result
-
- success -
Variable in class org.apache.hadoop.hbase.thrift.generated.Hbase.getVer_result
-
- success -
Variable in class org.apache.hadoop.hbase.thrift.generated.Hbase.getVerTs_result
-
- success -
Variable in class org.apache.hadoop.hbase.thrift.generated.Hbase.isTableEnabled_result
-
- success -
Variable in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerGet_result
-
- success -
Variable in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerGetList_result
-
- success -
Variable in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpen_result
-
- success -
Variable in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenTs_result
-
- success -
Variable in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithPrefix_result
-
- success -
Variable in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithScan_result
-
- success -
Variable in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStop_result
-
- success -
Variable in class org.apache.hadoop.hbase.thrift.generated.Hbase.scannerOpenWithStopTs_result
-
- success -
Variable in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.append_result
-
- success -
Variable in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndDelete_result
-
- success -
Variable in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.checkAndPut_result
-
- success -
Variable in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.deleteMultiple_result
-
- success -
Variable in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.exists_result
-
- success -
Variable in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.get_result
-
- success -
Variable in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.getMultiple_result
-
- success -
Variable in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.getScannerResults_result
-
- success -
Variable in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.getScannerRows_result
-
- success -
Variable in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.increment_result
-
- success -
Variable in class org.apache.hadoop.hbase.thrift2.generated.THBaseService.openScanner_result
-
- SUCCESSFUL_DELETE_KEY -
Static variable in interface org.apache.hadoop.hbase.rest.MetricsRESTSource
-
- SUCCESSFUL_GET_KEY -
Static variable in interface org.apache.hadoop.hbase.rest.MetricsRESTSource
-
- SUCCESSFUL_PUT_KEY -
Static variable in interface org.apache.hadoop.hbase.rest.MetricsRESTSource
-
- SUCCESSFUL_SCAN_KEY -
Static variable in interface org.apache.hadoop.hbase.rest.MetricsRESTSource
-
- sum(TableName, ColumnInterpreter<R, S, P, Q, T>, Scan) -
Method in class org.apache.hadoop.hbase.client.coprocessor.AggregationClient
- It sums up the value returned from various regions.
- sum(HTable, ColumnInterpreter<R, S, P, Q, T>, Scan) -
Method in class org.apache.hadoop.hbase.client.coprocessor.AggregationClient
- It sums up the value returned from various regions.
- summarize() -
Method in interface org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter
-
- sumOfMillisSecBetweenNexts -
Variable in class org.apache.hadoop.hbase.client.metrics.ScanMetrics
- sum of milliseconds between sequential next calls
- SUPERUSER_CONF_KEY -
Static variable in class org.apache.hadoop.hbase.security.access.AccessControlLists
- Configuration key for superusers
- supportsAutoLoading() -
Method in interface org.apache.hadoop.hbase.util.BloomFilter
-
- supportsAutoLoading() -
Method in class org.apache.hadoop.hbase.util.ByteBloomFilter
-
- supportsAutoLoading() -
Method in class org.apache.hadoop.hbase.util.CompoundBloomFilter
-
- suspendEncoding() -
Method in class org.apache.hadoop.hbase.util.Base64.Base64OutputStream
- Suspends encoding of the stream.
- SWITCH_TO_SIMPLE_AUTH -
Static variable in class org.apache.hadoop.hbase.security.SaslUtil
-
- switchBalancer(boolean, HMaster.BalanceSwitchMode) -
Method in class org.apache.hadoop.hbase.master.HMaster
- Assigns balancer switch according to BalanceSwitchMode
- sync() -
Method in class org.apache.hadoop.hbase.io.hfile.bucket.ByteBufferIOEngine
- No operation for the sync in the memory IO engine
- sync() -
Method in class org.apache.hadoop.hbase.io.hfile.bucket.FileIOEngine
- Sync the data to file after writing
- sync() -
Method in interface org.apache.hadoop.hbase.io.hfile.bucket.IOEngine
- Sync the data to IOEngine after writing
- sync() -
Method in interface org.apache.hadoop.hbase.regionserver.wal.HLog
-
- sync(long) -
Method in interface org.apache.hadoop.hbase.regionserver.wal.HLog
-
- sync() -
Method in interface org.apache.hadoop.hbase.regionserver.wal.HLog.Writer
-
- sync() -
Method in class org.apache.hadoop.hbase.regionserver.wal.ProtobufLogWriter
-
- sync(String, AsyncCallback.VoidCallback, Object) -
Method in class org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper
-
- sync(String) -
Method in class org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher
- Forces a synchronization of this ZooKeeper client connection.
- SYNC_TIME -
Static variable in interface org.apache.hadoop.hbase.regionserver.wal.MetricsWALSource
-
- SYNC_TIME_DESC -
Static variable in interface org.apache.hadoop.hbase.regionserver.wal.MetricsWALSource
-
- SYNC_WAL_VALUE -
Static variable in enum org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.Durability
SYNC_WAL = 3;
- SYNCHRONOUS_FIELD_NUMBER -
Static variable in class org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest
-
- synchronousBalanceSwitch(boolean) -
Method in class org.apache.hadoop.hbase.master.HMaster
-
- SYSTEM_LABEL -
Static variable in class org.apache.hadoop.hbase.security.visibility.VisibilityUtils
-
- SYSTEM_NAMESPACE -
Static variable in class org.apache.hadoop.hbase.NamespaceDescriptor
-
- SYSTEM_NAMESPACE_NAME -
Static variable in class org.apache.hadoop.hbase.NamespaceDescriptor
- System namespace name.
- SYSTEM_NAMESPACE_NAME_STR -
Static variable in class org.apache.hadoop.hbase.NamespaceDescriptor
-
- SystemTableWALEntryFilter - Class in org.apache.hadoop.hbase.replication
- Skips WAL edits for all System tables including META
- SystemTableWALEntryFilter() -
Constructor for class org.apache.hadoop.hbase.replication.SystemTableWALEntryFilter
-
Table = 3;
org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoorg.apache.hadoop.hbase.rest.protobuf.generated.TableInfoorg.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.Regionorg.apache.hadoop.hbase.rest.protobuf.generated.TableInfo.RegionTableInputFormats.org.apache.hadoop.hbase.rest.protobuf.generated.TableListorg.apache.hadoop.hbase.rest.protobuf.generated.TableListMapper class to add the required input key
and value classes.TableMapper and TableReducerZKNamespaceManagerReducer class to add the required key and
value input/output classes.org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaorg.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attributeorg.apache.hadoop.hbase.rest.protobuf.generated.TableSchema.Attributeorg.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaTableSplit.TableSplit(TableName, byte[], byte[], String)
TableSplit.TableSplit(TableName, byte[], byte[], String)
Tag.Tag(byte[], int, int)
length.
DataType implementation as a terminated
version of itself.wrapped.
wrapped.
term begins within src,
or -1 if term is not found.
Callable.call() throws an exception and we are going to retry; take action to
make it so we succeed on next call (clear caches, do relookup of locations, etc.).
Bytes.toBytes(boolean)
IOException.
buf,
from the index 0 (inclusive) to the limit (exclusive),
regardless of the current position.
Bytes.SIZEOF_SHORT bytes long.
HRegionInfo.toByteArray() when writing to a stream and you want to use
the pb mergeDelimitedFrom (w/o the delimiter, pb reads to EOF which may not be what you want).
TokenizerNode
connects the TokenizerNodes via standard java references
keeps a pool of TokenizerNodes and a reusable byte[] for holding all token contentAuthenticationProtos AuthenticationService coprocessor service.TOP = 0;
Struct represented by this.
RPCTInfoRPCTInforpc truncateTable(.TruncateTableRequest) returns (.TruncateTableResponse);
rpc truncateTable(.TruncateTableRequest) returns (.TruncateTableResponse);
LoadIncrementalHFiles.tryAtomicRegionLoad(HConnection, TableName, byte[], Collection)
UNASSIGNED = 0;
rpc UnassignRegion(.UnassignRegionRequest) returns (.UnassignRegionResponse);
rpc UnassignRegion(.UnassignRegionRequest) returns (.UnassignRegionResponse);
Union family of DataTypes encode one of a fixed
set of Objects.Union2 over the set of specified
types.
Union family of DataTypes encode one of a fixed
collection of Objects.Union3 over the set of specified
types.
Union family of DataTypes encode one of a fixed
collection of Objects.Union4 over the set of specified
types.
UNKNOWN = 0;
EventType
StoreFileManager.getCandidateFilesForRowKeyBefore(KeyValue) and
StoreFileManager.updateCandidateFilesForRowKeyBefore(Iterator, KeyValue, KeyValue)
for details on this methods.
HFileBlock.
rpc UpdateFavoredNodes(.UpdateFavoredNodesRequest) returns (.UpdateFavoredNodesResponse);
rpc UpdateFavoredNodes(.UpdateFavoredNodesRequest) returns (.UpdateFavoredNodesResponse);
LruCachedBlock
ResultStatsUtil
USE_DEFAULT = 0;
hbase:meta is deployed and accessible.
VERSION = 3;
org.apache.hadoop.hbase.rest.protobuf.generated.Versionorg.apache.hadoop.hbase.rest.protobuf.generated.VersionVisibilityLabelServiceGetAuthsRequestGetAuthsRequestGetAuthsResponseGetAuthsResponseListLabelsRequestListLabelsRequestListLabelsResponseListLabelsResponseMultiUserAuthorizationsMultiUserAuthorizationsSetAuthsRequestSetAuthsRequestUserAuthorizationsUserAuthorizationsVisibilityLabelVisibilityLabelVisibilityLabelsRequestVisibilityLabelsRequestVisibilityLabelsResponseVisibilityLabelsResponseVisibilityLabelsServiceInterProcessLock.MetadataHandler.
hbase:meta if available and waits
for up to the specified timeout if not immediately available.
hbase:meta.
MultiTableOutputFormat.WAL_OFF to turn off write-ahead logging (HLog)
WAL = 2;
FSHLog/WAL log events.Configuration.
FSHLog.CompactionDescriptorCompactionDescriptorFamilyScopeFamilyScopeScopeTypeWALHeaderWALHeaderWALKeyWALKeyWALTrailerWALTrailerWhileMatchFilter.filterAllRemaining() as soon
as the wrapped filters Filter.filterRowKey(byte[], int, int),
Filter.filterKeyValue(org.apache.hadoop.hbase.Cell),
Filter.filterRow() or
Filter.filterAllRemaining() methods
returns true.rpc WhoAmI(.WhoAmIRequest) returns (.WhoAmIResponse);
rpc WhoAmI(.WhoAmIRequest) returns (.WhoAmIResponse);
StoreFile.WriterBuilder.withOutputDir(org.apache.hadoop.fs.Path), but not both.
StoreFile.WriterBuilder.withFilePath(org.apache.hadoop.fs.Path), but not both.
ByteString without copying it.
ByteString without copying it.
ByteString without copying it.
ByteString without copying it.
HColumnDescriptor.toByteArray() instead.
HRegionInfo.toByteArray() and
HRegionInfo.toDelimitedByteArray()
MessageLite.toByteArray() instead.
Base64.Base64OutputStream.write(int) repeatedly until len bytes are
written.
WRITE = 1;
HFileBlock.BlockWritable instance, creates a new block of
its appropriate type, writes the writable into this block, and flushes
the block into the output stream.
HFileBlock.Writer.writeHeaderAndData(FSDataOutputStream), but records
the offset of this block so that it can be referenced in the next block
of the same type.
Writable instances
and returns the resulting byte array.
WritableUtils.writeVLong(java.io.DataOutput, long),
but writes to a ByteBuffer.
XOR = 3;
ByteString without copy.
InterProcessReadWriteLock.ProcedureCoordinatorRpcs for a ProcedureCoordinatorZKProcedureMemberRpcs.start(String, ProcedureMember) before this can be used.
SplitLogManager and SplitLogWorker
running distributed splitting of WAL logs.ClusterUpClusterUpMasterMasterMetaRegionServerMetaRegionServerRegionStoreSequenceIdsRegionStoreSequenceIdsRegionTransitionRegionTransitionReplicationHLogPositionReplicationHLogPositionReplicationLockReplicationLockReplicationPeerReplicationPeerReplicationStateReplicationStateReplicationState.StateSplitLogTaskSplitLogTaskSplitLogTask.RecoveryModeSplitLogTask.StateStoreSequenceIdStoreSequenceIdTableTableTable.StateTableLockTableLock
|
||||||||||
| PREV NEXT | FRAMES NO FRAMES | |||||||||